source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
sparkJobProgressMonitor.py
|
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.utils.template import PixiedustTemplateEnvironment
from IPython.core.getipython import *
from IPython.display import display, HTML, Javascript
from pixiedust.utils.shellAccess import ShellAccess
from pixiedust.utils.environment import Environment
from functools import reduce
import uuid
import json
import sys
import traceback
import pixiedust
from IPython.core.getipython import get_ipython
from collections import OrderedDict
from threading import Thread, Lock, Event
import time
myLogger = pixiedust.getLogger(__name__)
_env = PixiedustTemplateEnvironment()
progressMonitor = None
loadingProgressMonitor = False
def enableSparkJobProgressMonitor():
if Environment.isRunningOnDSX:
print("Spark Job Progress Monitoring cannot be started on DSX")
return
global progressMonitor, loadingProgressMonitor
if progressMonitor is None and not loadingProgressMonitor:
loadingProgressMonitor = True
def startSparkJobProgressMonitor():
global progressMonitor
progressMonitor = SparkJobProgressMonitor()
t = Thread(target=startSparkJobProgressMonitor)
t.daemon = True
t.start()
print("Successfully enabled Spark Job Progress Monitor")
else:
print("Spark Job Progress Monitor already enabled")
class SparkJobProgressMonitorOutput(Thread):
class Java:
implements = ["com.ibm.pixiedust.PixiedustOutputListener"]
def __init__(self):
super(SparkJobProgressMonitorOutput,self).__init__()
self.prefix = str(uuid.uuid4())[:8]
self.lock = Lock()
self.triggerEvent = Event()
self.daemon = True
self.progressData = OrderedDict()
def getUpdaterId(self):
return "updaterId{0}".format(self.prefix)
def getProgressHTMLId(self):
return "progress{0}".format(self.prefix)
def run(self):
while True:
self.triggerEvent.wait()
with self.lock:
self.triggerEvent.clear()
if bool(self.progressData):
progressData = self.progressData
self.progressData = OrderedDict()
else:
progressData = OrderedDict()
if bool(progressData):
js = ""
for data in progressData.values():
channel = data["channel"]
if channel=="jobStart":
js += _env.getTemplate("sparkJobProgressMonitor/addJobTab.js").render(
prefix=self.prefix, data=data, overalNumTasks=reduce(lambda x,y:x+y["numTasks"], data["stageInfos"], 0)
)
elif channel=="stageSubmitted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Submitted", host=None
)
elif channel=="taskStart":
js += _env.getTemplate("sparkJobProgressMonitor/taskStart.js").render(
prefix=self.prefix, data=data, increment = data["increment"],
totalCores=data["totalCores"], numExecutors=data["numExecutors"] )
js += "\n"
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageId"], status="Running",
host="{0}({1})".format(data["taskInfo"]["executorId"],data["taskInfo"]["host"] ),
totalCores=data["totalCores"], numExecutors=data["numExecutors"]
)
elif channel=="stageCompleted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Completed", host=None,
totalCores=data["totalCores"], numExecutors=data["numExecutors"]
)
elif channel=="jobEnd":
js += _env.getTemplate("sparkJobProgressMonitor/jobEnded.js").render(
prefix=self.prefix, jobId=data["jobId"], totalCores=data["totalCores"], numExecutors=data["numExecutors"]
)
elif channel=="executorAdded":
js += _env.getTemplate("sparkJobProgressMonitor/updateExecutor.js").render(
prefix=self.prefix, totalCores=data["totalCores"], numExecutors=data["numExecutors"]
)
elif channel=="executorRemoved":
js += _env.getTemplate("sparkJobProgressMonitor/updateExecutor.js").render(
prefix=self.prefix, totalCores=data["totalCores"], numExecutors=data["numExecutors"]
)
elif channel=="executorMetricsUpdate":
js += _env.getTemplate("sparkJobProgressMonitor/updateExecutor.js").render(
prefix=self.prefix, totalCores= data["executorMetricsInfo"]["totalCores"]
)
js += "\n"
display(Javascript(js))
time.sleep(0.5)
def display_with_id(self, obj, display_id, update=False):
"""Create a new display with an id"""
ip = get_ipython()
if hasattr(ip, "kernel"):
data, md = ip.display_formatter.format(obj)
content = {
'data': data,
'metadata': md,
'transient': {'display_id': display_id},
}
msg_type = 'update_display_data' if update else 'display_data'
ip.kernel.session.send(ip.kernel.iopub_socket, msg_type, content, parent=ip.parent_header)
else:
display(obj)
def printOutput(self, s):
print(s)
def sendChannel(self, channel, data):
self.printStuff(channel, data)
def onRunCell(self):
self.prefix = str(uuid.uuid4())[:8]
#Create the place holder area for the progress monitor
self.display_with_id(
HTML( _env.getTemplate("sparkJobProgressMonitor/pmLayout.html").render( prefix = self.prefix)),self.getProgressHTMLId()
)
def printStuff(self,channel, s):
try:
data = json.loads(s)
data["channel"] = channel
data["increment"] = 1
key = None
if channel=="jobStart":
key = "{0}-{1}".format(channel,data["jobId"])
elif channel=="stageSubmitted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="taskStart":
key = "{0}-{1}".format(channel,data["stageId"])
elif channel=="stageCompleted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="jobEnd":
key = "{0}-{1}".format(channel,data["jobId"])
elif channel=="executorAdded":
key = "{0}-{1}".format(channel,data["executorId"])
elif channel=="executorRemoved":
key = "{0}-{1}".format(channel,data["executorId"])
if key:
with self.lock:
if key in self.progressData:
data["increment"] = self.progressData[key]["increment"] + 1
self.progressData[key] = data
self.triggerEvent.set()
except:
print("Unexpected error: {0} - {1} : {2}".format(channel, s, sys.exc_info()[0]))
traceback.print_exc()
class SparkJobProgressMonitor(object):
def __init__(self):
self.monitorOutput = None
self.addSparkListener()
self.displayRuns={}
self.newDisplayRun = False
def onDisplayRun(self, contextId):
if contextId is None or self.monitorOutput is None:
self.newDisplayRun=True
return
cellContext = self.displayRuns.get( contextId )
if cellContext and cellContext != self.monitorOutput.prefix:
#switch the cell context if not a new display Run
if self.newDisplayRun:
self.displayRuns.pop( contextId, None )
else:
self.monitorOutput.prefix = cellContext
elif cellContext is None:
self.displayRuns[contextId] = self.monitorOutput.prefix
if cellContext:
display(Javascript(_env.getTemplate("sparkJobProgressMonitor/emptyTabs.js").render(prefix=cellContext)))
self.newDisplayRun=False
def addSparkListener(self):
try:
get_ipython().run_cell_magic(
"scala",
"cl=sparkProgressMonitor noSqlContext",
_env.getTemplate("sparkJobProgressMonitor/addSparkListener.scala").render()
)
listener = get_ipython().user_ns.get("__pixiedustSparkListener")
#access the listener object from the namespace
if listener:
self.monitorOutput = SparkJobProgressMonitorOutput()
self.monitorOutput.start()
#Add pre_run_cell event handler
get_ipython().events.register('pre_run_cell',lambda: self.monitorOutput.onRunCell() )
listener.setChannelListener( self.monitorOutput )
except:
myLogger.exception("Unexpected error while adding Spark Listener")
raise
|
train.py
|
import os
os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import torch
from src.env import MultipleEnvironments
from src.model import PPO
from src.utils import eval
import torch.multiprocessing as _mp
from torch.distributions import Categorical
import torch.nn.functional as F
import numpy as np
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--game", type=str, default="SuperMarioBros-Nes", help='游戏名称')
parser.add_argument("--trained_model", type=str, default=None, help='预训练模型')
parser.add_argument('--lr', type=float, default=1e-4, help='模型的学习率')
parser.add_argument('--gamma', type=float, default=0.9, help='奖励折扣率')
parser.add_argument('--tau', type=float, default=1.0, help='GAE参数')
parser.add_argument('--beta', type=float, default=0.01, help='熵权')
parser.add_argument('--epsilon', type=float, default=0.2, help='剪切替代目标参数')
parser.add_argument('--batch_size', type=int, default=16, help='训练数据的批量大小')
parser.add_argument('--num_epochs', type=int, default=10, help='每次采样训练多少轮')
parser.add_argument("--num_local_steps", type=int, default=512, help='每次采样的次数')
parser.add_argument("--num_processes", type=int, default=16, help='使用多少条线程启动游戏')
parser.add_argument("--saved_path", type=str, default="models", help='保存模型的路径')
parser.add_argument("--show_play", type=bool, default=False, help='是否显示评估游戏的界面,终端无法使用')
args = parser.parse_args()
return args
def train(args):
# 固定初始化状态
if torch.cuda.is_available():
torch.cuda.manual_seed(123)
else:
torch.manual_seed(123)
# 创建保存模型的文件夹
if not os.path.isdir(args.saved_path):
os.makedirs(args.saved_path)
# 创建多进程的游戏环境
envs = MultipleEnvironments(args.game, args.num_processes)
# 创建模型
model = PPO(envs.num_states, envs.num_actions)
# 加载预训练模型
if args.trained_model is not None:
model.load_state_dict(torch.load(args.trained_model))
# 使用 GPU训练
if torch.cuda.is_available():
model.cuda()
model.share_memory()
# 为游戏评估单独开一个进程
mp = _mp.get_context("spawn")
process = mp.Process(target=eval, args=(args, model, envs.num_states, envs.num_actions))
process.start()
# 创建优化方法
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# 刚开始给每个进程的游戏执行初始化
[agent_conn.send(("reset", None)) for agent_conn in envs.agent_conns]
# 获取游戏初始的界面
curr_states = [agent_conn.recv() for agent_conn in envs.agent_conns]
curr_states = torch.from_numpy(np.concatenate(curr_states, 0))
if torch.cuda.is_available():
curr_states = curr_states.cuda()
curr_episode = 0
while True:
curr_episode += 1
old_log_policies = []
actions = []
values = []
states = []
rewards = []
dones = []
# 执行游戏获取数据
for _ in range(args.num_local_steps):
states.append(curr_states)
# 执行预测
logits, value = model(curr_states)
# 计算每个动作的概率值
policy = F.softmax(logits, dim=1)
# 根据每个标签的概率随机生成符合概率的标签
old_m = Categorical(policy)
action = old_m.sample()
# 记录预测数据
actions.append(action)
values.append(value.squeeze())
# 计算损失使用
old_log_policy = old_m.log_prob(action)
old_log_policies.append(old_log_policy)
# 向各个进程游戏发送动作
if torch.cuda.is_available():
[agent_conn.send(("step", act)) for agent_conn, act in zip(envs.agent_conns, action.cpu())]
else:
[agent_conn.send(("step", act)) for agent_conn, act in zip(envs.agent_conns, action)]
# 将多进程的游戏数据打包
state, reward, done, info = zip(*[agent_conn.recv() for agent_conn in envs.agent_conns])
# 进行数据转换
state = torch.from_numpy(np.concatenate(state, 0))
# 转换为pytorch数据
if torch.cuda.is_available():
state = state.cuda()
reward = torch.cuda.FloatTensor(reward)
done = torch.cuda.FloatTensor(done)
else:
reward = torch.FloatTensor(reward)
done = torch.FloatTensor(done)
# 记录预测数据
rewards.append(reward)
dones.append(done)
curr_states = state
# 根据上面最后的图像预测
_, next_value, = model(curr_states)
next_value = next_value.squeeze()
old_log_policies = torch.cat(old_log_policies).detach()
actions = torch.cat(actions)
values = torch.cat(values).detach()
states = torch.cat(states)
gae = 0
R = []
for value, reward, done in list(zip(values, rewards, dones))[::-1]:
gae = gae * args.gamma * args.tau
gae = gae + reward + args.gamma * next_value.detach() * (1 - done) - value.detach()
next_value = value
R.append(gae + value)
R = R[::-1]
R = torch.cat(R).detach()
advantages = R - values
total_losses = []
for i in range(args.num_epochs):
indice = torch.randperm(args.num_local_steps * args.num_processes)
for j in range(args.batch_size):
batch_indices = indice[
int(j * (args.num_local_steps * args.num_processes / args.batch_size)): int((j + 1) * (
args.num_local_steps * args.num_processes / args.batch_size))]
# 根据拿到的图像执行预测
logits, value = model(states[batch_indices])
# 计算每个动作的概率值
new_policy = F.softmax(logits, dim=1)
new_m = Categorical(new_policy)
# 计算损失
new_log_policy = new_m.log_prob(actions[batch_indices])
ratio = torch.exp(new_log_policy - old_log_policies[batch_indices])
actor_loss = -torch.mean(torch.min(ratio * advantages[batch_indices],
torch.clamp(ratio, 1.0 - args.epsilon, 1.0 + args.epsilon) *
advantages[batch_indices]))
critic_loss = F.smooth_l1_loss(R[batch_indices], value.squeeze())
entropy_loss = torch.mean(new_m.entropy())
total_loss = actor_loss + critic_loss - args.beta * entropy_loss
# 计算梯度
optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_losses.append(float(total_loss))
print("Episode: {}. Total loss: {:.4f}".format(curr_episode, np.mean(total_losses)))
torch.save(model.state_dict(), "{}/model_{}.pth".format(args.saved_path, args.game))
if __name__ == "__main__":
args = get_args()
train(args)
|
extract_geoip.py
|
#!/usr/bin/env python3
import argparse
import multiprocessing
from geoip2 import database
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from pymongo.errors import CursorNotFound
from geoip2.errors import AddressNotFoundError
def connect(host):
return MongoClient('mongodb://{}:27017'.format(host))
def retrieve_domains(db, skip, limit):
return db.dns.find({'a_record.0': {'$exists': True},
'country_code': {'$exists': False}})[limit - skip:limit]
def update_data(db, ip, post):
try:
res = db.dns.update_one({'a_record': {'$in': [ip]}}, {'$set': post}, upsert=False)
if res.modified_count > 0:
print('INFO: updated ip {} country code {} with {} documents'.format(ip, post['country_code'], res.modified_count))
except DuplicateKeyError:
pass
def extract_geodata(db, ip, input):
reader = database.Reader(input)
try:
data = reader.country(ip)
except AddressNotFoundError:
return
country_code = data.registered_country.iso_code
if country_code:
update_data(db, ip, {'country_code': country_code})
def worker(host, skip, limit):
args = argparser()
client = connect(args.host)
db = client.ip_data
try:
domains = retrieve_domains(db, limit, skip)
for domain in domains:
for ip in domain['a_record']:
extract_geodata(db, ip, args.input)
client.close()
except CursorNotFound:
return
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--worker', help='set worker count', type=int, required=True)
parser.add_argument('--input', help='set the input file', type=str, required=True)
parser.add_argument('--host', help='set the host', type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = argparser()
client = connect(args.host)
db = client.ip_data
jobs = []
threads = args.worker
amount = round(db.dns.estimated_document_count() / threads)
limit = amount
for f in range(threads):
j = multiprocessing.Process(target=worker, args=(args.host, limit, amount))
jobs.append(j)
j.start()
limit = limit + amount
for j in jobs:
j.join()
client.close()
print('exitcode = {}'.format(j.exitcode))
|
multitester.py
|
"""
Certbot Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies certbot repo and puts it on the instances
- Runs certbot tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_letsencrypt_auto_venv_only.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import sys, os, time, argparse, socket
import multiprocessing as mp
from multiprocessing import Manager
import urllib2
import yaml
import boto3
from botocore.exceptions import ClientError
import fabric
from fabric.api import run, execute, local, env, sudo, cd, lcd
from fabric.operations import get, put
from fabric.context_managers import shell_env
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
#parser.add_argument('--script_args',
# nargs='+',
# help='space-delimited list of arguments to pass to the bash test script',
# required=False)
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='certbot git repo to use')
parser.add_argument('--branch',
default='~',
help='certbot git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
parser.add_argument('--killboulder',
action='store_true',
help="do not leave a persistent boulder server running")
parser.add_argument('--boulderonly',
action='store_true',
help="only make a boulder server")
parser.add_argument('--fast',
action='store_true',
help="use larger instance types to run faster (saves about a minute, probably not worth it)")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-5f490b35' # premade shared boulder AMI 14.04LTS us-east-1
LOGDIR = "" #points to logging / working directory
# boto3/AWS api globals
AWS_SESSION = None
EC2 = None
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def make_security_group():
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = EC2.create_security_group(GroupName="letsencrypt_test",
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(instance_name,
ami_id,
keyname,
machine_type='t2.micro',
security_groups=['letsencrypt_test'],
userdata=""): #userdata contains bash or cloud-init script
new_instance = EC2.create_instances(
BlockDeviceMappings=_get_block_device_mappings(ami_id),
ImageId=ami_id,
SecurityGroups=security_groups,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type)[0]
# brief pause to prevent rare error on EC2 delay, should block until ready instead
time.sleep(1.0)
# give instance a name
try:
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
except ClientError as e:
if "InvalidInstanceID.NotFound" in str(e):
# This seems to be ephemeral... retry
time.sleep(1)
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
else:
raise
return new_instance
def _get_block_device_mappings(ami_id):
"""Returns the list of block device mappings to ensure cleanup.
This list sets connected EBS volumes to be deleted when the EC2
instance is terminated.
"""
# Not all devices use EBS, but the default value for DeleteOnTermination
# when the device does use EBS is true. See:
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-mapping.html
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
return [{'DeviceName': mapping['DeviceName'],
'Ebs': {'DeleteOnTermination': True}}
for mapping in EC2.Image(ami_id).block_device_mappings
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
# the reinstantiation from id is necessary to force boto3
# to correctly update the 'state' variable during init
_id = booting_instance.id
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
while _state != 'running' or _ip is None:
time.sleep(wait_time)
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
block_until_ssh_open(_ip)
time.sleep(extra_wait_time)
return _instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(repo_url):
"clones master of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name):
"clones branch <branch_name> of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt')
def local_git_PR(repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr)
local('cd letsencrypt && git checkout lePRtest')
if merge_master:
local('cd letsencrypt && git remote update origin')
local('cd letsencrypt && git merge origin/master -m "testmerge"')
local('tar czf le.tar.gz letsencrypt')
def local_repo_to_remote():
"copies local tarball of repo to remote"
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
def local_repo_clean():
"delete tarball"
with lcd(LOGDIR):
local('rm le.tar.gz')
def deploy_script(scriptpath, *args):
"copies to remote and executes local script"
#with lcd('scripts'):
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str)
def run_boulder():
with cd('$GOPATH/src/github.com/letsencrypt/boulder'):
run('go run cmd/rabbitmq-setup/main.go -server amqp://localhost')
run('nohup ./start.py >& /dev/null < /dev/null &')
def config_and_launch_boulder(instance):
execute(deploy_script, 'scripts/boulder_config.sh')
execute(run_boulder)
def install_and_launch_certbot(instance, boulder_url, target):
execute(local_repo_to_remote)
with shell_env(BOULDER_URL=boulder_url,
PUBLIC_IP=instance.public_ip_address,
PRIVATE_IP=instance.private_ip_address,
PUBLIC_HOSTNAME=instance.public_dns_name,
PIP_EXTRA_INDEX_URL=cl_args.alt_pip,
OS_TYPE=target['type']):
execute(deploy_script, cl_args.test_script)
def grab_certbot_log():
"grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi')
# fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./certbot.log ]; then \
cat ./certbot.log; else echo "[nolocallog]"; fi')
def create_client_instances(targetlist):
"Create a fleet of client instances"
instances = []
print("Creating instances: ", end="")
for target in targetlist:
if target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
instances.append(make_instance(name,
target['ami'],
KEYNAME,
machine_type=machine_type,
userdata=userdata))
print()
return instances
def test_client_process(inqueue, outqueue):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, target = inreq
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instances[ii] = block_until_instance_ready(instances[ii])
print("server %s at %s"%(instances[ii], instances[ii].public_ip_address))
env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address)
print(env.host_string)
try:
install_and_launch_certbot(instances[ii], boulder_url, target)
outqueue.put((ii, target, 'pass'))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, 'fail'))
print("%s - %s FAIL"%(target['ami'], target['name']))
pass
# append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n")
try:
execute(grab_certbot_log)
except:
print("log fail\n")
pass
def cleanup(cl_args, instances, targetlist):
print('Logs in ', LOGDIR)
if not cl_args.saveinstances:
print('Terminating EC2 Instances')
if cl_args.killboulder:
boulder_server.terminate()
for instance in instances:
instance.terminate()
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
#-------------------------------------------------------------------------------
# SCRIPT BEGINS
#-------------------------------------------------------------------------------
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# Set up local copy of git repo
#-------------------------------------------------------------------------------
LOGDIR = "letest-%d"%int(time.time())
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
exit()
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
AWS_SESSION = boto3.session.Session(profile_name=PROFILE)
EC2 = AWS_SESSION.resource('ec2')
print("Making Security Group")
sg_exists = False
for sg in EC2.security_groups.all():
if sg.group_name == 'letsencrypt_test':
sg_exists = True
print(" %s already exists"%'letsencrypt_test')
if not sg_exists:
make_security_group()
time.sleep(30)
boulder_preexists = False
boulder_servers = EC2.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance('le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_groups=['letsencrypt_test'])
try:
if not cl_args.boulderonly:
instances = create_client_instances(targetlist)
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
if cl_args.boulderonly:
sys.exit(0)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
SENTINEL = None #queue kill signal
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
# wait on termination of client processes
for p in jobs:
p.join()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
for outq in outputs:
ii, target, status = outq
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
results_file.close()
finally:
cleanup(cl_args, instances, targetlist)
# kill any connections
fabric.network.disconnect_all()
|
TapClient.py
|
from tkinter import messagebox
import socket,asyncore
from CStructures import *
from init import *
from Constants import *
import threading,time
from CQueue import *
from threading import *
from CStructures import *
import MainWindow
from tkinter import ttk
import asyncio
import MainWindowGUI
from array import *
from profile import *
from logging_config import *
from Exception import *
import pandas as pd
tcp_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connectDone=threading.Event()
processFalg=False
flag=False
Queue=CQueue()
SendData=CQueue()
InData=CQueue()
LoginSendData=CQueue()
QueueTrade=CQueue()
lastAckTimeData=CQueue()
logOutputQueue=CQueue()
requestSentQueue=CQueue()
queueAmbi=CQueue()
queueProcessAmbi=CQueue()
tempSendData=CQueue()
b=MessageHeader(0)
class TapClient():
receiveData=False
ProcessData=False
isConnection = False
bytesToRemove=0
TAPSequenceNumber=0
DataReceived=0
NewOrderRejected=0
ModifyOrderRejected = 0
CancelOrderRejected = 0
OrderAckReceived = 0
SpreadAckReceived = 0
TradeReceived = 0
bolLogin = True
bolConnected = False
data={}
dicseg=set()
Symlist=set()
def __init__(self):
pass
def connect(self):
try:
TapClient.connection(constants.TapIp, constants.TapPort)
return True
except:
logger.exception(PrintException())
return False
def connection(host_ip,server_port):
try:
try:
threading.Thread(target=TapClient.ConnectCallback, args=(host_ip,server_port,),daemon=True).start()
connectDone.wait()
except:
logger.exception(PrintException())
messagebox.showwarning('error','''Can't Connect''')
return True
except:
logger.exception(PrintException())
return False
def ConnectCallback(host_ip, server_port):
try:
while 1:
if TapClient.isconnected()==False:
tcp_client.connect((host_ip, server_port))
logOutputQueue.Enqueue("TT Connected...")
logger.info("Connection started On IP : " + str(host_ip) + " Port: " + str(server_port))
connectDone.set()
TapClient.receiveData = True
tapClient.OnConnected(None,None)
if(TapClient.isconnected()==True):
TapClient.Receive()
except socket.timeout as exc:
logger.exception(exc)
except:
logger.exception(PrintException())
def OnConnected(self,status,discription):
try:
TapClient.TAPSequenceNumber = 1
InData.PacketsCanSend = 1
SendData.PacketsCanSend = 0
LoginSendData.PacketsCanSend = 0
TapClient.DataReceived = 0
TapClient.NewOrderRejected = 0
TapClient.ModifyOrderRejected = 0
TapClient.CancelOrderRejected = 0
TapClient.OrderAckReceived = 0
TapClient.SpreadAckReceived = 0
TapClient.TradeReceived = 0
TapClient.ProcessData=True
TapClient.bolLogin = True
TapClient.bolConnected = False
tapClient.StartThreads()
except:
logger.exception(PrintException())
def isconnected():
try:
data=array('B')
tcp_client.send(data)
return True
except:
return False
logger.exception(PrintException())
def Receive():
try:
state=StateObject()
state.worksocket=tcp_client
if(TapClient.isconnected()==True):
TapClient.ReceiveCallback()
except:
logger.exception(PrintException())
def ReceiveCallback():
try:
stateObj=StateObject()
while 1:
bytesRead=tcp_client.recv(9999)
InData.Enqueue(bytesRead)
InData.AddCounter(1)
TapClient.DataReceived+=1
except:
logger.exception(PrintException())
def SendLoginRequest(self):
try:
Header=MessageHeader(constants.TranCode.LoginRequest)
Header.Prop01MessageLength=constants.LoginRequestSize
Header.Prop02TransactionCode=constants.TranCode.LoginRequest.value
cLoginRequest=SignOn()
cLoginRequest.Prop01Header=[Header.cMessageLength,Header.cTransactionCode]
cLoginRequest.Prop02LoginId = constants.LoginId
cLoginRequest.Prop03MemberPassword = constants.MemberPassword
cLoginRequest.Prop04TradingPassword = constants.TradingPassword
cLoginRequest.Prop05IP = constants.TapIp
cLoginRequest.Prop06Reserved =""
TapClient.SendRequest(cLoginRequest.StructToByte())
requestSentQueue.Enqueue(cLoginRequest.ToString())
except:
logger.exception(PrintException())
print("no acoount or internet")
return False
def SendRequest(loginCred):
try:
SendData.Enqueue(loginCred)
except:
logger.exception(PrintException())
def SendScripMasterDownload(self):
header=MessageHeader(1)
header.Prop01MessageLength=constants.ScripMasterRequest
header.Prop02TransactionCode=constants.TranCode.ScripMasterRequest
NFScripMaster=ScripMasterRequest(1)
NFScripMaster.Prop01Header=[header.cMessageLength,header.cTransactionCode]
NFScripMaster.Prop02ExchangeCode=constants.NFExCode
NFScripMaster.Prop03Reserved=''
TapClient.SendRequest(NFScripMaster.StructToByte())
requestSentQueue.Enqueue(NFScripMaster.ToString())
NCScripMaster=ScripMasterRequest(1)
NCScripMaster.Prop01Header=[header.cMessageLength,header.cTransactionCode]
NCScripMaster.Prop02ExchangeCode=constants.NCExcode
NCScripMaster.Prop03Reserved=''
TapClient.SendRequest(NCScripMaster.StructToByte())
requestSentQueue.Enqueue(NCScripMaster.ToString())
BCScripMaster=ScripMasterRequest(1)
BCScripMaster.Prop01Header=[header.cMessageLength,header.cTransactionCode]
BCScripMaster.Prop02ExchangeCode=constants.BCExcode
BCScripMaster.Prop03Reserved=''
TapClient.SendRequest(BCScripMaster.StructToByte())
requestSentQueue.Enqueue(BCScripMaster.ToString())
RNScripMaster=ScripMasterRequest(1)
RNScripMaster.Prop01Header=[header.cMessageLength,header.cTransactionCode]
RNScripMaster.Prop02ExchangeCode=constants.RNExCode
RNScripMaster.Prop03Reserved=''
TapClient.SendRequest(RNScripMaster.StructToByte())
requestSentQueue.Enqueue(RNScripMaster.ToString())
RMScripMaster=ScripMasterRequest(1)
RMScripMaster.Prop01Header=[header.cMessageLength,header.cTransactionCode]
RMScripMaster.Prop02ExchangeCode=constants.RMExcode
RMScripMaster.Prop03Reserved=''
TapClient.SendRequest(RMScripMaster.StructToByte())
requestSentQueue.Enqueue(RMScripMaster.ToString())
MXScripMaster=ScripMasterRequest(1)
MXScripMaster.Prop01Header=[header.cMessageLength,header.cTransactionCode]
MXScripMaster.Prop02ExchangeCode=constants.MXExcode
MXScripMaster.Prop03Reserved=''
TapClient.SendRequest(MXScripMaster.StructToByte())
requestSentQueue.Enqueue(MXScripMaster.ToString())
NXScripMaster=ScripMasterRequest(1)
NXScripMaster.Prop01Header=[header.cMessageLength,header.cTransactionCode]
NXScripMaster.Prop02ExchangeCode=constants.NXExcode
NXScripMaster.Prop03Reserved=''
TapClient.SendRequest(NXScripMaster.StructToByte())
requestSentQueue.Enqueue(NXScripMaster.ToString())
return True
def StartThreads(self):
try:
tapClient.StartThread( ['SendData','ListenSocket','InData'])
except:
logger.exception(PrintException())
def StartThread(self,threadName):
try:
for threads in threadName:
if threads=="ProcessXml":
thread=Thread(target=tapClient.ThreadProcessXml,args=(),daemon=True)
if thread.isAlive():
thread.stop()
thread.start()
if threads=="ListenSocket":
thread=Thread(target=tapClient.ListenSocket,args=(),daemon=True)
if thread.isAlive():
thread.stop()
thread.start()
if threads=="InData":
thread=Thread(target=tapClient.ThreadParseData,args=(),daemon=True)
if thread.isAlive():
thread.stop()
thread.start()
if threads=="SendData":
thread=Thread(target=tapClient.ThreadSendData,args=(),daemon=True)
if thread.isAlive():
thread.stop()
thread.start()
if threads=="SendLoginData":
thread=Thread(target=tapClient.ThreadSendLoginData,daemon=True)
if thread.isAlive():
thread.stop()
thread.start()
except:
logger.exception(PrintException())
def ThreadProcessXml(self):
while 1:
try:
TradeConfirm=QueueTrade.DeQueue()
MainWindow.logOutPutQueue.Enqueue("Queue XML:"+TradeConfirm)
if Tradeconfirm in MainWindow.dicReqIDTrack:
with lock:
pass1
except:
logger.exception(PrintException())
def ListenSocket(self):
try:
while 1:
if tcp_client.fileno() != -1:
isConnection = True
else:
isConnection=False
except:
logger.warning(PrintException())
def ThreadParseData(self):
data=array('B')
PartialBuffer=array('B')
TotalBuffer=array('B')
try:
while TapClient.ProcessData:
if TapClient.bytesToRemove>0 or len(PartialBuffer)>0:
TotalBuffer=array('B')
TotalBuffer.extend(PartialBuffer)
bytesToRemove=0
PartialBuffer=array('B')
if tapClient.CheckCompletePacket(TotalBuffer) == False:
data=InData.DeQueue()
TotalBuffer.extend(data)
else:
messageHeader=array('B')
messageHeader.extend(TotalBuffer[0:constants.MsgHeaderSize])
header=MessageHeader(0)
header.ByteToStruct(messageHeader)
transactioncode=header.cTransactionCode[0]
if transactioncode==1:
loginResponse=LoginResponse(1)
loginResponse.ByteToStruct(TotalBuffer)
if(loginResponse.cStatusCode==0):
isSuccess=True
else:
isSuccess=False
logOutputQueue.Enqueue('\n'+loginResponse.ToString())
elif transactioncode==2:
MainWindow.MainWindow.logOutputQueue.EnQueue("\n" + "LogOff done Successfully");
elif transactioncode==21:
scripMasterResponse=ScripMasterResponse(1)
scripMasterResponse.ByteToStruct(TotalBuffer)
numberOfScrips=0
sourceIndex = 0
if scripMasterResponse.cExchangeCode[0:]=='NF':
nfData=array('B')
nfData.extend(TotalBuffer[8:header.cMessageLength[0]])
numberOfScrips=len(nfData)/constants.DerivativeMasterItemSize
dicNfScripMaster={}
dicTokenvsNF={}
for i in range(int(numberOfScrips)):
nfScripMaster=NFScripMaster(True)
nfScripData=array('B')
nfScripData.extend(nfData[sourceIndex:sourceIndex+constants.DerivativeMasterItemSize])
nfScripMaster.ByteToStruct(nfScripData)
if nfScripMaster.cScripShortName.strip(' \x00') not in dicNfScripMaster:
dicNfScripMaster.update({nfScripMaster.cScripShortName.strip(' \x00'):[nfScripMaster.cDataLength[0:],nfScripMaster.cDerivativeType[0:].strip(' \x00'),nfScripMaster.cScripCode[0:].strip(' \x00'),nfScripMaster.cScripShortName[0:].strip(' \x00'),nfScripMaster.cExpiryDate[0:].strip(' \x00'),nfScripMaster.cFutOption[0:].strip(' \x00'),nfScripMaster.cStrikePrice[0:],nfScripMaster.cLotSize[0:]]})
sourceIndex+=constants.DerivativeMasterItemSize
MainWindowGUI.FeedRequest.LoadScripmaster(dicNfScripMaster,scripMasterResponse.Prop02ExchangeCode)
logOutputQueue.Enqueue('\n'+scripMasterResponse.ToString()+'||'+str(next(iter(dicNfScripMaster.values()))))
elif scripMasterResponse.cExchangeCode[0:]=='NC':
ncData=array('B')
ncData.extend(TotalBuffer[8:header.cMessageLength[0]])
numberOfScrips=len(ncData)/constants.CashcripMasterSize
sourceIndex = 0
dicNcScripMaster={}
dicBcScripMaster={}
for i in range(int(numberOfScrips)):
ncScripMaster=NCScripMaster(True)
ncScripData=array('B')
ncScripData.extend(ncData[sourceIndex:sourceIndex+constants.CashcripMasterSize])
ncScripMaster.ByteToStruct(ncScripData)
if ncScripMaster.cScripShortName.strip(' \x00') not in dicNcScripMaster:
dicNcScripMaster.update({ncScripMaster.cScripShortName.strip(' \x00'):[ncScripMaster.cDataLength[0:],ncScripMaster.CSegment[0:].strip(' \x00'),ncScripMaster.cScripCode[0:].strip(' \x00'),ncScripMaster.cScripShortName[0:].strip(' \x00')]})
sourceIndex+=constants.CashcripMasterSize
MainWindowGUI.FeedRequest.LoadScripmaster(dicNcScripMaster,scripMasterResponse.Prop02ExchangeCode)
logOutputQueue.Enqueue('\n'+scripMasterResponse.ToString()+'||'+str(next(iter(dicNcScripMaster.values()))))
elif scripMasterResponse.cExchangeCode[0:]=='BC':
ncData=array('B')
ncData.extend(TotalBuffer[8:header.cMessageLength[0]])
numberOfScrips=len(ncData)/constants.CashcripMasterSize
sourceIndex = 0
dicNcScripMaster={}
dicBcScripMaster={}
for i in range(int(numberOfScrips)):
ncScripMaster=NCScripMaster(True)
ncScripData=array('B')
ncScripData.extend(ncData[sourceIndex:sourceIndex+constants.CashcripMasterSize])
ncScripMaster.ByteToStruct(ncScripData)
if ncScripMaster.cScripShortName.strip(' \x00') not in dicBcScripMaster:
dicBcScripMaster.update({ncScripMaster.cScripShortName.strip(' \x00'):[ncScripMaster.cDataLength[0:],ncScripMaster.CSegment[0:].strip(' \x00'),ncScripMaster.cScripCode[0:].strip(' \x00'),ncScripMaster.cScripShortName[0:].strip(' \x00')]})
sourceIndex+=constants.CashcripMasterSize
MainWindowGUI.FeedRequest.LoadScripmaster(dicBcScripMaster,scripMasterResponse.Prop02ExchangeCode)
logOutputQueue.Enqueue('\n'+scripMasterResponse.ToString()+'||'+str(next(iter(dicBcScripMaster.values()))))
elif scripMasterResponse.cExchangeCode[0:]=='RN':
rnData=array('B')
rnData.extend(TotalBuffer[8:header.cMessageLength[0]])
numberOfScrips=len(rnData)/constants.CurrencycripMasterSize
sourceIndex = 0
dicRNScripMaster={}
dicRMScripMaster={}
for i in range(int(numberOfScrips)):
currencycripMaster=RNScripMaster(True)
scripData=array('B')
scripData.extend(rnData[sourceIndex:sourceIndex+constants.CurrencycripMasterSize])
currencycripMaster.ByteToStruct(scripData)
if currencycripMaster.cScripShortName.strip(' \x00') not in dicRNScripMaster:
dicRNScripMaster.update({currencycripMaster.cScripShortName.strip(' \x00'):[currencycripMaster.cDataLength[0:],currencycripMaster.cCurrencyType[0:].strip(' \x00'),currencycripMaster.cScripCode[0:].strip(' \x00'),currencycripMaster.cScripShortName[0:].strip(' \x00'),currencycripMaster.cExpiryDate[0:].strip(' \x00'),currencycripMaster.cFutOption[0:].strip(' \x00'),currencycripMaster.cStrikePrice[0:],
currencycripMaster.cLotSize[0:],currencycripMaster.cDisplayLotSize[0:],currencycripMaster.cLotType[0:].strip(' \x00'),currencycripMaster.cDisplayLotType[0:].strip(' \x00'),currencycripMaster.cOFType[0:].strip(' \x00'),currencycripMaster.cMinimumTradeQty[0:],currencycripMaster.cPriceTick[0:].strip(' \x00'),currencycripMaster.cMultipler[0:]]})
sourceIndex+=constants.CurrencycripMasterSize
MainWindowGUI.FeedRequest.LoadScripmaster(dicRNScripMaster,scripMasterResponse.Prop02ExchangeCode)
logOutputQueue.Enqueue('\n'+scripMasterResponse.ToString()+'||'+str(next(iter(dicRNScripMaster.values()))))
elif scripMasterResponse.cExchangeCode[0:]=='RM':
rnData=array('B')
rnData.extend(TotalBuffer[8:header.cMessageLength[0]])
numberOfScrips=len(rnData)/constants.CurrencycripMasterSize
sourceIndex = 0
dicRNScripMaster={}
dicRMScripMaster={}
for i in range(int(numberOfScrips)):
currencycripMaster=RNScripMaster(True)
scripData=array('B')
scripData.extend(rnData[sourceIndex:sourceIndex+constants.CurrencycripMasterSize])
currencycripMaster.ByteToStruct(scripData)
if ncScripMaster.cScripShortName.strip(' \x00') not in dicRMScripMaster:
dicRMScripMaster.update({currencycripMaster.cScripShortName.strip(' \x00'):[currencycripMaster.cDataLength[0:],currencycripMaster.cCurrencyType[0:].strip(' \x00'),currencycripMaster.cScripCode[0:].strip(' \x00'),currencycripMaster.cScripShortName[0:].strip(' \x00'),currencycripMaster.cExpiryDate[0:].strip(' \x00'),currencycripMaster.cFutOption[0:].strip(' \x00'),currencycripMaster.cStrikePrice[0:],
currencycripMaster.cLotSize[0:],currencycripMaster.cDisplayLotSize[0:],currencycripMaster.cLotType[0:].strip(' \x00'),currencycripMaster.cDisplayLotType[0:].strip(' \x00'),currencycripMaster.cOFType[0:].strip(' \x00'),currencycripMaster.cMinimumTradeQty[0:],currencycripMaster.cPriceTick[0:].strip(' \x00'),currencycripMaster.cMultipler[0:]]})
sourceIndex+=constants.CurrencycripMasterSize
MainWindowGUI.FeedRequest.LoadScripmaster(dicRMScripMaster,scripMasterResponse.Prop02ExchangeCode)
logOutputQueue.Enqueue('\n'+scripMasterResponse.ToString()+'||'+str(next(iter(dicRMScripMaster.values()))))
elif transactioncode==22:
feedResponse=FeedResponse(1)
feedResponse.ByteToStruct(TotalBuffer)
token=feedResponse.cScripToken
if token not in MainWindowGUI.FeedRequest.DicFeedsRespone:
MainWindowGUI.FeedRequest.DicFeedsRespone.update({token:[feedResponse.cHeader[0:],feedResponse.cScripToken[0:],feedResponse.cLTPrice,feedResponse.cLTQuantity,feedResponse.cLTDate[0:],feedResponse.cBidPrice,feedResponse.cBidQuantity,feedResponse.cOfferPrice,feedResponse.cOfferQuantity,feedResponse.cTotalTradedQty,feedResponse.cTotalTradedQty,feedResponse.cTradedQuantity,feedResponse.cAverageTradePrice]})
attribut=[str(attr)+':'+str(val) for (attr,val) in feedResponse.__dict__.items()]
MainWindowGUI.FeedRequest.DicFeedsRespone.update({token:attribut})
feedResponsestr=feedResponse.ToString()
logOutputQueue.Enqueue('Feed Response :'+feedResponsestr)
strsss=TapClient.getKeysByValues(token)
TapClient.data.update({strsss:[feedResponse.cLTDate.strip('\x00 '),feedResponse.cLTPrice/100,feedResponse.cBidPrice/100,feedResponse.cBidQuantity,feedResponse.cOfferPrice/100,feedResponse.cOfferQuantity]})
init.init.treeview.delete(*init.init.treeview.get_children())
for k,v in TapClient.data.items():
if k not in TapClient.Symlist:
init.init.treeview.insert('','end',text=(k),values=(v[0:]))
if k in TapClient.Symlist:
init.init.treeview.insert('','end',text=(k),values=(v[0:]))
children =init.init.treeview.get_children()
for child in children:
TapClient.Symlist.add(init.init.treeview.item(child)['text'])
elif transactioncode==26:
depthResponse=MarketDepthResponse(1)
depthResponse.ByteToStruct(TotalBuffer)
token=depthResponse.cScripCode
if token not in MainWindowGUI.FeedRequest.DicMarketDepthResponse:
attribut=[str(attr)+':'+str(val) for (attr,val) in depthResponse.__dict__.items()]
MainWindowGUI.FeedRequest.DicMarketDepthResponse.update({token:attribut})
logOutputQueue.Enqueue('Depth Response : '+depthResponse.ToString())
elif transactioncode==11: #soc
soc=SharekhanOrderConfirmation(1)
soc.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue("SharekhanOrderConfirmation : "+soc.ToString())
if soc.cRequestID not in MainWindowGUI.FeedRequest.DicSharekhanOrderResponse:
MainWindowGUI.FeedRequest.DicSharekhanOrderResponse.update({soc.cRequestID[0:]:[MessageHeader.ToString(soc.cHeader),soc.cRequestID[0:],soc.cExchangeCode[0:],soc.cCount,soc.cOrderConfirmationItems[0:]]})
logger.info("SharekhanOrderConfirmation :"+soc.ToString())
self.diclock=threading.Condition()
elif transactioncode==13:
orderConfirmation=ExchangeTradeConfirmation(1)
orderConfirmation.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue('Exchange Order Confirmation :'+orderConfirmation.ToString())
if orderConfirmation.cSharekhanOrderID.replace('\0','').strip(' ') in MainWindowGUI.FeedRequest.dicSharekhanIDvsAPIReqID:
APIReqID=MainWindowGUI.FeedRequest.dicSharekhanIDvsAPIReqID[orderConfirmation.cSharekhanOrderID.replace('\0','').strip(' ')]
self.diclock=threading.Condition()
with self.diclock:
order=MainWindow.orders()
order=MainWindowGUI.FeedRequest.dicReqIDTrack[APIReqID]
order.ExchangeOrdID=orderConfirmation.cExchangeOrderId.replace('\0','').strip(' ')
order.ExchangeSignal=orderConfirmation.cBuySell
order.ConfrmType="ExchangeConfirmation"
logger.info("Exchange Order Confirmation :"+order.ToString())
elif transactioncode==14:
tradeConfirmation=ExchangeTradeConfirmation(1)
tradeConfirmation.ByteToStruct(TotalBuffer)
self.diclock=threading.Condition()
with self.diclock:
if tradeConfirmation.cSharekhanOrderID.replace('\0','').strip(' ') not in MainWindowGUI.FeedRequest.dicTradeConfirm:
attribut=[str(attr)+':'+str(val) for (attr,val) in tradeConfirmation.__dict__.items()]
MainWindowGUI.FeedRequest.dicTradeConfirm.update({tradeConfirmation.cExchangeCode.replace('\0','').strip(' '):attribut})
else:
MainWindowGUI.FeedRequest.dicTradeConfirm[tradeConfirmation.cSharekhanOrderID.replace('\0','').strip(' ')]=attribut
QueueTrade.Enqueue(tradeConfirmation.cSharekhanOrderID.replace('\0','').strip(' '))
elif transactioncode==31:
objTransCode31=ReportResponse(1)
objTransCode31.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode31.ToString())
for i in range(objTransCode31.cRecordCount[0]):
logOutputQueue.Enqueue("Record Number : " + str(i))
objReportResponse=EquityOrderReportItem(1)
objReportResponse.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objReportResponse.ToString())
elif transactioncode==32:
objTransCode32=ReportResponse(1)
objTransCode32.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode32.ToString())
for i in range(objTransCode32.cRecordCount[0]):
logOutputQueue.Enqueue("Record Number : " + str(i))
DPSRReport=DPSRReportItem(1)
DPSRReport.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(DPSRReport.ToString())
elif transactioncode==33:
objTransCode33=ReportResponse(1)
objTransCode33.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode33.ToString())
cashOrderStart=0
cashOrderEnd=0
if objTransCode33.cRecordCount[0]>0:
for i in range(objTransCode33.cRecordCount[0]):
FixSize=461
if i==0:
cashOrderStart=10
cashOrderEnd=10+FixSize
else:
cashOrderStart=cashOrderEnd
cashOrderEnd=cashOrderStart+FixSize
Report=array('B')
Report.extend(TotalBuffer[0:FixSize])
logOutputQueue.Enqueue('Record Number :'+str(i))
CashOrderDetailsReportItemResponse=CashOrderDetailsReportItem(1)
CashOrderDetailsReportItemResponse.ByteToStruct(Report)
logOutputQueue.Enqueue(CashOrderDetailsReportItemResponse.ToString())
elif transactioncode==34:
objTransCode34=ReportResponse(1)
objTransCode34.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode34.ToString())
cashOrderStart=0
cashOrderEnd=0
if objTransCode34.cRecordCount[0]>0:
for i in range(objTransCode34.cRecordCount):
FixSize=294
if i==0:
cashOrderStart=10
cashOrderEnd=10+FixSize
else:
cashOrderStart=cashOrderEnd+100
cashOrderEnd=cashOrderStart+FixSize+100
Report=array('B')
Report.extend(TotalBuffer[0:FixSize])
logOutputQueue.Enqueue('Record Number :'+str(i))
CashTradeDetailsReport=CashTradeDetailsReportItem(1)
CashTradeDetailsReport.ByteToStruct(Report)
logOutputQueue.Enqueue(CashTradeDetailsReport.ToString())
elif transactioncode==35:
objTransCode35=ReportResponse(1)
objTransCode35.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode35.ToString())
for i in range(objTransCode35.cRecordCount[0]):
logOutputQueue.Enqueue("Record Number : " + str(i))
cashLimitReportItem=CashLimitReportItem(1)
cashLimitReportItem.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(cashLimitReportItem.ToString())
elif transactioncode==36:
objTransCode36=ReportResponse(1)
objTransCode36.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode36.ToString())
for i in range(objTransCode36.cRecordCount[0]):
logOutputQueue.Enqueue("Record Number : " + str(i))
cashNetPositionReportItem=CashNetPositionReportItem(1)
cashNetPositionReportItem.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(cashNetPositionReportItem.ToString())
elif transactioncode==41:
objTransCode41=ReportResponse(1)
objTransCode41.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode41.ToString())
for i in range(objTransCode41.cRecordCount[0]):
logOutputQueue.Enqueue("Record Number : " + str(i))
derivativeOrderReportItem=DerivativeOrderReportItem(1)
derivativeOrderReportItem.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(derivativeOrderReportItem.ToString())
elif transactioncode==42:
objTransCode42=ReportResponse(1)
objTransCode42.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode42.ToString())
for i in range(objTransCode42.cRecordCount[0]):
logOutputQueue.Enqueue("Record Number : " + str(i))
turnOverReportItem=TurnOverReportItem(1)
turnOverReportItem.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(turnOverReportItem.ToString())
elif transactioncode==43:
objTransCode43=ReportResponse(1)
objTransCode43.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode43.ToString())
Reportstartind=0
Reortendind=0
if objTransCode43.cRecordCount[0]>0:
for i in range(objTransCode43.cRecordCount[0]):
FixSize=764
if i==0:
Reportstartind=10
Reortendind=10+FixSize
else:
Reportstartind=Reortendind
Reortendind=Reportstartind+FixSize
Report=array('B')
Report.extend(TotalBuffer[0:FixSize])
logOutputQueue.Enqueue('Record Number :'+str(i))
objDerOrdDetailReport=DerivativeOrderDetailReportItem(1)
objDerOrdDetailReport.ByteToStruct(Report)
logOutputQueue.Enqueue(objDerOrdDetailReport.ToString())
elif transactioncode==44:
objTransCode44=ReportResponse(1)
objTransCode44.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode44.ToString())
fnotradeStart=0
fnotradeEnd=0
if objTransCode44.cRecordCount[0]>0:
for i in range(objTransCode44.cRecordCount):
FixSize=364
if i==0:
fnotradeStart=10
fnotradeEnd=10+FixSize
else:
fnotradeStart=fnotradeEnd+100
fnotradeEnd=fnotradeStart+FixSize+100
Report=array('B')
Report.extend(TotalBuffer[0:FixSize])
logOutputQueue.Enqueue('Record Number :'+str(i))
objDerTradeDetail=DerivativeTradeDetailsReportItem(1)
objDerTradeDetail.ByteToStruct(Report)
logOutputQueue.Enqueue(objDerTradeDetail.ToString())
elif transactioncode==54:
objTransCode54=ReportResponse(1)
objTransCode54.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objTransCode54.ToString())
for i in range(objTransCode54.cRecordCount[0]):
logOutputQueue.Enqueue("Record Number : " + str(i))
objcmdLimit=CommodityLimitReportItem(1)
objcmdLimit.ByteToStruct(TotalBuffer)
logOutputQueue.Enqueue(objcmdLimit.ToString())
if header.cMessageLength[0] <len(TotalBuffer):
bytesToRemove=header.cMessageLength[0]
PartialBuffer=TotalBuffer[bytesToRemove:]
TotalBuffer=array('B')
except:
logger.exception(PrintException())
def getKeysByValues(token):
try:
l = ""
seg=init.init.selectedSegment
if seg+':'+token not in TapClient.dicseg:
TapClient.dicseg.add(seg+':'+token)
dicsegval=TapClient.dicseg
for segment in dicsegval:
ss=segment.split(':')
if ss[0]=='NC':
for k, v in MainWindowGUI.FeedRequest.ncScripMaster.items():
if token.strip('\x00 ') == v[2]:
l=k
return l
elif ss[0]=='NF':
for k, v in MainWindowGUI.FeedRequest.nfScripMaster.items():
if token.strip('\x00 ') == v[2]:
l=k
return l
elif ss[0]=='RN':
for k, v in MainWindowGUI.FeedRequest.rnScripMaster.items():
if token.strip('\x00 ') == v[2]:
l=k
return l
elif ss[0]=='RM':
for k, v in MainWindowGUI.FeedRequest.rmScripMaster.items():
if token.strip('\x00 ') == v[2]:
l=k
return l
elif ss[0]=='BC':
for k, v in MainWindowGUI.FeedRequest.bcScripMaster.items():
if token.strip('\x00 ') == v[2]:
l=k
return l
elif ss[0]=='NX':
for k, v in MainWindowGUI.FeedRequest.nxScripMaster.items():
if token.strip('\x00 ') == v[2]:
l=k
return l
elif ss[0]=='MX':
for k, v in MainWindowGUI.FeedRequest.mxScripMaster.items():
l=k
if token.strip('\x00 ') == v[2]:
return l
except:
logger.exception(PrintException())
def CheckCompletePacket(self,TotalBuffer):
try:
if len(TotalBuffer)>0:
messageHeader=array('B')
messageHeader=TotalBuffer[0:constants.MsgHeaderSize]
header=MessageHeader(0)
header.ByteToStruct(messageHeader)
if header.cTransactionCode==0:
return True
else:
if header.cMessageLength[0] <= len(TotalBuffer):
return True
else:
return False
else:
return False
except:
logger.exception(PrintException())
return False
def ThreadSendData(self):
while TapClient.ProcessData:
try:
if processFalg==False:
data=SendData.DeQueue(True)
TapClient.Send(tcp_client,data)
except:
logger.exception(PrintException())
def Send(socket,data):
try:
processFalg = True
tcp_client.send(data)
processFalg = False
except:
logger.exception(PrintException())
def ThreadSendLoginData(self):
while ProcessData:
try:
data=LoginSendData.DeQueue(true)
send(tcp_client,data)
except:
logger.exception(PrintException())
def SubscribeforFeeds(self,request):
TapClient.SendRequest(request)
def SendOrderReportRequest(self,p):
TapClient.SendRequest(p)
class StateObject():
def __init__(self):
self.worksocket=None
buffer=array('B')
data=array('B')
tapClient=TapClient()
|
29.asyncEventThreadsafe.py
|
import asyncio
from threading import Thread
class Event_ts(asyncio.Event):
# TODO: clear() method
def set(self):
self._loop.call_soon_threadsafe(super().set)
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
async def producer(event):
while True:
print('producer emit signal')
event.set()
await asyncio.sleep(1)
async def consumer(event):
while True:
await event.wait()
event.clear()
print('consumer get signal')
new_loop = asyncio.new_event_loop()
t = Thread(target=start_loop, args=(new_loop,))
t.start()
e = Event_ts()
asyncio.run_coroutine_threadsafe(producer(e), new_loop)
asyncio.get_event_loop().run_until_complete(consumer(e))
|
aprs2_graphite.py
|
import Queue
import thread
import threading
import graphitesend
# don't hold a massive backlog, just momentary spikes
max_queue_size = 500
g_thread = None
class GraphiteThread(object):
def __init__(self, log):
self.log = log
self.graphite = None
self.thr = None
self.queue = Queue.Queue(max_queue_size)
self.stopping = threading.Event()
self.check_connect()
t = threading.Thread(target = self.__consume)
t.daemon = True
t.start()
def check_connect(self):
if self.graphite == None or self.graphite.socket == None:
try:
self.log.info("Connecting to Graphite")
self.graphite = graphitesend.GraphiteClient(fqdn_squash=True, graphite_server='t2graph.aprs2.net', graphite_port=2003)
except Exception, e:
self.log.error("Failed to connect to Graphite: %r" % e)
return
def __consume(self):
while not self.stopping.is_set():
try:
item = self.queue.get(block = True, timeout = 1)
self.check_connect()
metric, value = item
self.transmit(metric, value)
self.queue.task_done()
except Queue.Empty:
pass
except Exception as e:
import traceback
self.log.error("GraphiteThread: %s", traceback.format_exc())
self.queue.task_done()
self.log.debug("GraphiteThread stopping")
def transmit(self, metric, value):
if self.graphite == None:
return
try:
self.graphite.send(metric, value)
except graphitesend.GraphiteSendException:
self.log.exception("Graphite send failed")
try:
self.graphite.disconnect()
except Exception:
pass
self.graphite = None
class GraphiteSender(object):
def __init__(self, log, fqdn):
self.log = log
global g_thread
if g_thread == None:
g_thread = GraphiteThread(log)
# remove domain from fqdn
hostname = fqdn
#i = hostname.find('.')
#if i >= 0:
# hostname = hostname[0:i]
self.hostname = hostname
def send(self, metric, value):
global g_thread
if g_thread.graphite == None:
# don't even queue
return
try:
g_thread.queue.put(('aprs2.%s.%s' % (self.hostname, metric), value), block = True, timeout = 0.1)
return True
except Queue.Full:
self.log.error("GraphiteSender: queue full")
return False
|
bazel_build.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bridge between Xcode and Bazel for the "build" action."""
import atexit
import errno
import fcntl
import hashlib
import inspect
import io
import json
import os
import pipes
import plistlib
import re
import shutil
import signal
import subprocess
import sys
import textwrap
import threading
import time
import zipfile
from apfs_clone_copy import CopyOnWrite
import bazel_build_events
import bazel_build_settings
import bazel_options
from bootstrap_lldbinit import BootstrapLLDBInit
from bootstrap_lldbinit import TULSI_LLDBINIT_FILE
import tulsi_logging
from update_symbol_cache import UpdateSymbolCache
# List of frameworks that Xcode injects into test host targets that should be
# re-signed when running the tests on devices.
XCODE_INJECTED_FRAMEWORKS = [
'libXCTestBundleInject.dylib',
'libXCTestSwiftSupport.dylib',
'IDEBundleInjection.framework',
'XCTAutomationSupport.framework',
'XCTest.framework',
]
_logger = None
def _PrintUnbuffered(msg):
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
def _PrintXcodeWarning(msg):
sys.stdout.write(':: warning: %s\n' % msg)
sys.stdout.flush()
def _PrintXcodeError(msg):
sys.stderr.write(':: error: %s\n' % msg)
sys.stderr.flush()
def _Fatal(msg, fatal_frame=None):
"""Print a fatal error pointing to the failure line inside the script."""
if not fatal_frame:
fatal_frame = inspect.currentframe().f_back
filename, line_number, _, _, _ = inspect.getframeinfo(fatal_frame)
_PrintUnbuffered('%s:%d: error: %s' % (os.path.abspath(filename),
line_number, msg))
CLEANUP_BEP_FILE_AT_EXIT = False
# Function to be called atexit to clean up the BEP file if one is present.
# This is especially useful in cases of abnormal termination (such as what
# happens when Xcode is killed).
def _BEPFileExitCleanup(bep_file_path):
if not CLEANUP_BEP_FILE_AT_EXIT:
return
try:
os.remove(bep_file_path)
except OSError as e:
_PrintXcodeWarning('Failed to remove BEP file from %s. Error: %s' %
(bep_file_path, e.strerror))
def _InterruptHandler(signum, frame):
"""Gracefully exit on SIGINT."""
del signum, frame # Unused.
_PrintUnbuffered('Caught interrupt signal. Exiting...')
sys.exit(0)
def _FindDefaultLldbInit():
"""Returns the path to the primary lldbinit file that Xcode would load or None when no file exists."""
for lldbinit_shortpath in ['~/.lldbinit-Xcode', '~/.lldbinit']:
lldbinit_path = os.path.expanduser(lldbinit_shortpath)
if os.path.isfile(lldbinit_path):
return lldbinit_path
return None
class Timer(object):
"""Simple profiler."""
def __init__(self, action_name, action_id):
"""Creates a new Timer object.
Args:
action_name: A human-readable action name, shown in the build log.
action_id: A machine-readable action identifier, can be used for metrics.
Returns:
A Timer instance.
Raises:
RuntimeError: if Timer is created without initializing _logger.
"""
if _logger is None:
raise RuntimeError('Attempted to create Timer without a logger.')
self.action_name = action_name
self.action_id = action_id
self._start = None
def Start(self):
self._start = time.time()
return self
def End(self, log_absolute_times=False):
end = time.time()
seconds = end - self._start
if log_absolute_times:
_logger.log_action(self.action_name, self.action_id, seconds,
self._start, end)
else:
_logger.log_action(self.action_name, self.action_id, seconds)
def _LockFileCreate():
# This relies on this script running at the root of the bazel workspace.
cwd = os.environ['PWD']
cwd_hash = hashlib.sha256(cwd.encode()).hexdigest()
return '/tmp/tulsi_bazel_build_{}.lock'.format(cwd_hash)
# Function to be called atexit to release the file lock on script termination.
def _LockFileExitCleanup(lock_file_handle):
lock_file_handle.close()
def _LockFileAcquire(lock_path):
"""Force script to wait on file lock to serialize build target actions.
Args:
lock_path: Path to the lock file.
"""
_PrintUnbuffered('Queuing Tulsi build...')
lockfile = open(lock_path, 'w')
# Register "fclose(...)" as early as possible, before acquiring lock.
atexit.register(_LockFileExitCleanup, lockfile)
while True:
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
class CodesignBundleAttributes(object):
"""Wrapper class for codesigning attributes of a signed bundle."""
# List of codesigning attributes that this script requires.
_ATTRIBUTES = ['Authority', 'Identifier', 'TeamIdentifier']
def __init__(self, codesign_output):
self.attributes = {}
pending_attributes = list(self._ATTRIBUTES)
for line in codesign_output.split('\n'):
if not pending_attributes:
break
for attribute in pending_attributes:
if line.startswith(attribute):
value = line[len(attribute) + 1:]
self.attributes[attribute] = value
pending_attributes.remove(attribute)
break
for attribute in self._ATTRIBUTES:
if attribute not in self.attributes:
_PrintXcodeError(
'Failed to extract %s from %s.\n' % (attribute, codesign_output))
def Get(self, attribute):
"""Returns the value for the given attribute, or None if it wasn't found."""
value = self.attributes.get(attribute)
if attribute not in self._ATTRIBUTES:
_PrintXcodeError(
'Attribute %s not declared to be parsed. ' % attribute +
'Available attributes are %s.\n' % self._ATTRIBUTES)
return value
class _OptionsParser(object):
"""Handles parsing script options."""
def __init__(self, build_settings, sdk_version, platform_name, arch):
self.targets = []
self.build_settings = build_settings
self.common_build_options = [
'--verbose_failures',
'--bes_outerr_buffer_size=0', # Don't buffer Bazel output.
]
self.sdk_version = sdk_version
self.platform_name = platform_name
if self.platform_name.startswith('watch'):
config_platform = 'watchos'
elif self.platform_name.startswith('iphone'):
config_platform = 'ios'
elif self.platform_name.startswith('macos'):
config_platform = 'macos'
elif self.platform_name.startswith('appletv'):
config_platform = 'tvos'
else:
self._WarnUnknownPlatform()
config_platform = 'ios'
self.bazel_build_config = '{}_{}'.format(config_platform, arch)
if self.bazel_build_config not in build_settings.platformConfigFlags:
_PrintXcodeError('Unknown active compilation target of "{}". '
'Please report a Tulsi bug.'
.format(self.bazel_build_config))
sys.exit(1)
self.verbose = 0
self.bazel_bin_path = 'bazel-bin'
self.bazel_executable = None
@staticmethod
def _UsageMessage():
"""Returns a usage message string."""
usage = textwrap.dedent("""\
Usage: %s <target> [<target2> ...] --bazel <bazel_binary_path> [options]
Where options are:
--verbose [-v]
Increments the verbosity of the script by one level. This argument
may be provided multiple times to enable additional output levels.
--bazel_bin_path <path>
Path at which Bazel-generated artifacts may be retrieved.
""" % sys.argv[0])
return usage
def ParseOptions(self, args):
"""Parses arguments, returning (message, exit_code)."""
bazel_executable_index = args.index('--bazel')
self.targets = args[:bazel_executable_index]
if not self.targets or len(args) < bazel_executable_index + 2:
return (self._UsageMessage(), 10)
self.bazel_executable = args[bazel_executable_index + 1]
return self._ParseVariableOptions(args[bazel_executable_index + 2:])
def GetBaseFlagsForTargets(self, config):
is_debug = config == 'Debug'
return self.build_settings.flags_for_target(
self.targets[0],
is_debug,
self.bazel_build_config)
def GetEnabledFeatures(self):
"""Returns a list of enabled Bazel features for the active target."""
return self.build_settings.features_for_target(self.targets[0])
def GetBazelOptions(self, config):
"""Returns the full set of build options for the given config."""
bazel, start_up, build = self.GetBaseFlagsForTargets(config)
all_build = []
all_build.extend(self.common_build_options)
all_build.extend(build)
xcode_version_flag = self._ComputeXcodeVersionFlag()
if xcode_version_flag:
all_build.append('--xcode_version=%s' % xcode_version_flag)
return bazel, start_up, all_build
def _WarnUnknownPlatform(self):
_PrintUnbuffered('Warning: unknown platform "%s" will be treated as '
'iOS' % self.platform_name)
def _ParseVariableOptions(self, args):
"""Parses flag-based args, returning (message, exit_code)."""
verbose_re = re.compile('-(v+)$')
while args:
arg = args[0]
args = args[1:]
if arg == '--bazel_bin_path':
if not args:
return ('Missing required parameter for %s' % arg, 2)
self.bazel_bin_path = args[0]
args = args[1:]
elif arg == '--verbose':
self.verbose += 1
else:
match = verbose_re.match(arg)
if match:
self.verbose += len(match.group(1))
else:
return ('Unknown option "%s"\n%s' % (arg, self._UsageMessage()), 1)
return (None, 0)
@staticmethod
def _GetXcodeBuildVersionString():
"""Returns Xcode build version from the environment as a string."""
return os.environ['XCODE_PRODUCT_BUILD_VERSION']
@staticmethod
def _GetXcodeVersionString():
# """Returns Xcode version info from the environment as a string."""
# xcodebuild_bin = os.path.join(os.environ["SYSTEM_DEVELOPER_BIN_DIR"], "xcodebuild")
# # Expect something like this
# # ['Xcode 11.2.1', 'Build version 11B500', '']
# # This command is a couple hundred MS to run and should be removed. On
# # Xcode 11.2.1 Xcode uses the wrong # version, although version.plist is
# # correct.
# process = subprocess.Popen([xcodebuild_bin, "-version"], stdout=subprocess.PIPE)
# process.wait()
#
# if process.returncode != 0:
# _PrintXcodeWarning('Can\'t find xcode version')
# return None
#
# output = process.stdout.read()
# lines = output.split("\n")
# return lines[0].split(" ")[1]
"""Returns Xcode version info from the Xcode's version.plist.
Just reading XCODE_VERSION_ACTUAL from the environment seems like
a more reasonable implementation, but has shown to be unreliable,
at least when using Xcode 11.3.1 and opening the project within an
Xcode workspace.
"""
developer_dir = os.environ['DEVELOPER_DIR']
app_dir = developer_dir.split('.app')[0] + '.app'
version_plist_path = os.path.join(app_dir, 'Contents', 'version.plist')
try:
# python2 API to plistlib - needs updating if/when Tulsi bumps to python3
plist = plistlib.readPlist(version_plist_path)
except IOError:
_PrintXcodeWarning('Tulsi cannot determine Xcode version, error '
'reading from {}'.format(version_plist_path))
return None
try:
# Example: "11.3.1", "11.3", "11.0"
key = 'CFBundleShortVersionString'
version_string = plist[key]
except KeyError:
_PrintXcodeWarning('Tulsi cannot determine Xcode version from {}, no '
'"{}" key'.format(version_plist_path, key))
return None
# But we need to normalize to major.minor.patch, e.g. 11.3.0 or
# 11.0.0, so add one or two ".0" if needed (two just in case
# there is ever just a single version number like "12")
dots_count = version_string.count('.')
dot_zeroes_to_add = 2 - dots_count
version_string += '.0' * dot_zeroes_to_add
return version_string
@staticmethod
def _ComputeXcodeVersionFlag():
"""Returns a string for the --xcode_version build flag, if any.
The flag should be used if the active Xcode version was not the same one
used during project generation.
Note this a best-attempt only; this may not be accurate as Bazel itself
caches the active DEVELOPER_DIR path and the user may have changed their
installed Xcode version.
"""
xcode_version = _OptionsParser._GetXcodeVersionString()
build_version = _OptionsParser._GetXcodeBuildVersionString()
if not xcode_version or not build_version:
return None
# Of the form Major.Minor.Fix.Build (new Bazel form) or Major.Min.Fix (old).
full_bazel_version = os.environ.get('TULSI_XCODE_VERSION')
if not full_bazel_version: # Unexpected: Tulsi gen didn't set the flag.
return xcode_version
# Newer Bazel versions specify the version as Major.Minor.Fix.Build.
if full_bazel_version.count('.') == 3:
components = full_bazel_version.rsplit('.', 1)
bazel_xcode_version = components[0]
bazel_build_version = components[1]
if (xcode_version != bazel_xcode_version
or build_version != bazel_build_version):
return '{}.{}'.format(xcode_version, build_version)
else:
return None
else: # Old version of Bazel. We need to use form Major.Minor.Fix.
return xcode_version if xcode_version != full_bazel_version else None
class BazelBuildBridge(object):
"""Handles invoking Bazel and unpacking generated binaries."""
BUILD_EVENTS_FILE = 'build_events.json'
def __init__(self, build_settings):
self.build_settings = build_settings
self.verbose = 0
self.bazel_bin_path = None
self.codesign_attributes = {}
self.codesigning_folder_path = os.environ['CODESIGNING_FOLDER_PATH']
self.xcode_action = os.environ['ACTION'] # The Xcode build action.
# When invoked as an external build system script, Xcode will set ACTION to
# an empty string.
if not self.xcode_action:
self.xcode_action = 'build'
if int(os.environ['XCODE_VERSION_MAJOR']) < 900:
xcode_build_version = os.environ['XCODE_PRODUCT_BUILD_VERSION']
_PrintXcodeWarning('Tulsi officially supports Xcode 9+. You are using an '
'earlier Xcode, build %s.' % xcode_build_version)
self.tulsi_version = os.environ.get('TULSI_VERSION', 'UNKNOWN')
self.custom_lldbinit = os.environ.get('TULSI_LLDBINIT_FILE')
# TODO(b/69857078): Remove this when wrapped_clang is updated.
self.direct_debug_prefix_map = False
self.normalized_prefix_map = False
self.update_symbol_cache = UpdateSymbolCache()
# Target architecture. Must be defined for correct setting of
# the --cpu flag. Note that Xcode will set multiple values in
# ARCHS when building for a Generic Device.
archs = os.environ.get('ARCHS')
if not archs:
_PrintXcodeError('Tulsi requires env variable ARCHS to be '
'set. Please file a bug against Tulsi.')
sys.exit(1)
self.arch = archs.split()[-1]
# Path into which generated artifacts should be copied.
self.built_products_dir = os.environ['BUILT_PRODUCTS_DIR']
# Path where Xcode expects generated sources to be placed.
self.derived_sources_folder_path = os.environ.get('DERIVED_SOURCES_DIR')
# Full name of the target artifact (e.g., "MyApp.app" or "Test.xctest").
self.full_product_name = os.environ['FULL_PRODUCT_NAME']
# Whether to generate runfiles for this target.
self.gen_runfiles = os.environ.get('GENERATE_RUNFILES')
# Target SDK version.
self.sdk_version = os.environ.get('SDK_VERSION')
# TEST_HOST for unit tests.
self.test_host_binary = os.environ.get('TEST_HOST')
# Whether this target is a test or not.
self.is_test = os.environ.get('WRAPPER_EXTENSION') == 'xctest'
# Target platform.
self.platform_name = os.environ['PLATFORM_NAME']
# Type of the target artifact.
self.product_type = os.environ['PRODUCT_TYPE']
# Path to the parent of the xcodeproj bundle.
self.project_dir = os.environ['PROJECT_DIR']
# Path to the xcodeproj bundle.
self.project_file_path = os.environ['PROJECT_FILE_PATH']
# Path to the directory containing the WORKSPACE file.
self.workspace_root = os.path.abspath(os.environ['TULSI_WR'])
# Set to the name of the generated bundle for bundle-type targets, None for
# single file targets (like static libraries).
self.wrapper_name = os.environ.get('WRAPPER_NAME')
self.wrapper_suffix = os.environ.get('WRAPPER_SUFFIX', '')
# Path where Xcode expects the artifacts to be written to. This is not the
# codesigning_path as device vs simulator builds have different signing
# requirements, so Xcode expects different paths to be signed. This is
# mostly apparent on XCUITests where simulator builds set the codesigning
# path to be the .xctest bundle, but for device builds it is actually the
# UI runner app (since it needs to be codesigned to run on the device.) The
# FULL_PRODUCT_NAME variable is a stable path on where to put the expected
# artifacts. For static libraries (objc_library, swift_library),
# FULL_PRODUCT_NAME corresponds to the .a file name, which coincides with
# the expected location for a single artifact output.
# TODO(b/35811023): Check these paths are still valid.
self.artifact_output_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])
# Path to where Xcode expects the binary to be placed.
self.binary_path = os.path.join(
os.environ['TARGET_BUILD_DIR'], os.environ['EXECUTABLE_PATH'])
self.is_simulator = self.platform_name.endswith('simulator')
# Check to see if code signing actions should be skipped or not.
if self.is_simulator:
self.codesigning_allowed = False
else:
self.codesigning_allowed = os.environ.get('CODE_SIGNING_ALLOWED') == 'YES'
if self.codesigning_allowed:
platform_prefix = 'iOS'
if self.platform_name.startswith('macos'):
platform_prefix = 'macOS'
entitlements_filename = '%sXCTRunner.entitlements' % platform_prefix
self.runner_entitlements_template = os.path.join(self.project_file_path,
'.tulsi',
'Resources',
entitlements_filename)
self.bazel_executable = None
def Run(self, args):
"""Executes a Bazel build based on the environment and given arguments."""
if self.xcode_action != 'build':
sys.stderr.write('Xcode action is %s, ignoring.' % self.xcode_action)
return 0
parser = _OptionsParser(self.build_settings,
self.sdk_version,
self.platform_name,
self.arch)
timer = Timer('Parsing options', 'parsing_options').Start()
message, exit_code = parser.ParseOptions(args[1:])
timer.End()
if exit_code:
_PrintXcodeError('Option parsing failed: %s' % message)
return exit_code
self.verbose = parser.verbose
self.bazel_bin_path = os.path.abspath(parser.bazel_bin_path)
self.bazel_executable = parser.bazel_executable
self.bazel_exec_root = self.build_settings.bazelExecRoot
# Update feature flags.
features = parser.GetEnabledFeatures()
self.direct_debug_prefix_map = 'DirectDebugPrefixMap' in features
self.normalized_prefix_map = 'DebugPathNormalization' in features
# Path to the Build Events JSON file uses pid and is removed if the
# build is successful.
filename = '%d_%s' % (os.getpid(), BazelBuildBridge.BUILD_EVENTS_FILE)
self.build_events_file_path = os.path.join(
self.project_file_path,
'.tulsi',
filename)
(command, retval) = self._BuildBazelCommand(parser)
if retval:
return retval
timer = Timer('Running Bazel', 'running_bazel').Start()
exit_code, outputs = self._RunBazelAndPatchOutput(command)
timer.End()
if exit_code:
_Fatal('Bazel build failed with exit code %d. Please check the build '
'log in Report Navigator (⌘9) for more information.'
% exit_code)
return exit_code
post_bazel_timer = Timer('Total Tulsi Post-Bazel time', 'total_post_bazel')
post_bazel_timer.Start()
if not os.path.exists(self.bazel_exec_root):
_Fatal('No Bazel execution root was found at %r. Debugging experience '
'will be compromised. Please report a Tulsi bug.'
% self.bazel_exec_root)
return 404
# This needs to run after `bazel build`, since it depends on the Bazel
# workspace directory
exit_code = self._LinkTulsiWorkspace()
if exit_code:
return exit_code
exit_code, outputs_data = self._ExtractAspectOutputsData(outputs)
if exit_code:
return exit_code
# Generated headers are installed on a thread since we are launching
# a separate process to do so. This gives us clean timings.
install_thread = threading.Thread(
target=self._InstallGeneratedHeaders, args=(outputs,))
install_thread.start()
timer = Timer('Installing artifacts', 'installing_artifacts').Start()
exit_code = self._InstallArtifact(outputs_data)
timer.End()
install_thread.join()
if exit_code:
return exit_code
exit_code, dsym_paths = self._InstallDSYMBundles(
self.built_products_dir, outputs_data)
if exit_code:
return exit_code
if not dsym_paths:
# Clean any bundles from a previous build that can interfere with
# debugging in LLDB.
self._CleanExistingDSYMs()
else:
for path in dsym_paths:
# Starting with Xcode 9.x, a plist based remapping exists for dSYM
# bundles that works with Swift as well as (Obj-)C(++).
#
# This solution also works for Xcode 8.x for (Obj-)C(++) but not
# for Swift.
timer = Timer('Adding remappings as plists to dSYM',
'plist_dsym').Start()
exit_code = self._PlistdSYMPaths(path)
timer.End()
if exit_code:
_PrintXcodeError('Remapping dSYMs process returned %i, please '
'report a Tulsi bug and attach a full Xcode '
'build log.' % exit_code)
return exit_code
# Starting with Xcode 7.3, XCTests inject several supporting frameworks
# into the test host that need to be signed with the same identity as
# the host itself.
if (self.is_test and not self.platform_name.startswith('macos') and
self.codesigning_allowed):
exit_code = self._ResignTestArtifacts()
if exit_code:
return exit_code
# Starting with Xcode 8, .lldbinit files are honored during Xcode debugging
# sessions. This allows use of the target.source-map field to remap the
# debug symbol paths encoded in the binary to the paths expected by Xcode.
#
# This will not work with dSYM bundles, or a direct -fdebug-prefix-map from
# the Bazel-built locations to Xcode-visible sources.
timer = Timer('Updating .lldbinit', 'updating_lldbinit').Start()
clear_source_map = dsym_paths or self.direct_debug_prefix_map
exit_code = self._UpdateLLDBInit(clear_source_map)
timer.End()
if exit_code:
_PrintXcodeWarning('Updating .lldbinit action failed with code %d' %
exit_code)
post_bazel_timer.End(log_absolute_times=True)
return 0
def _BuildBazelCommand(self, options):
"""Builds up a commandline string suitable for running Bazel."""
configuration = os.environ['CONFIGURATION']
# Treat the special testrunner build config as a Debug compile.
test_runner_config_prefix = '__TulsiTestRunner_'
if configuration.startswith(test_runner_config_prefix):
configuration = configuration[len(test_runner_config_prefix):]
elif os.environ.get('TULSI_TEST_RUNNER_ONLY') == 'YES':
_PrintXcodeError('Building test targets with configuration "%s" is not '
'allowed. Please use the "Test" action or "Build for" > '
'"Testing" instead.' % configuration)
return (None, 1)
bazel, start_up, build = options.GetBazelOptions(configuration)
bazel_command = [bazel]
bazel_command.extend(start_up)
bazel_command.append('build')
bazel_command.extend(build)
bazel_command.extend([
# The following flags are used by Tulsi to identify itself and read
# build information from Bazel. They shold not affect Bazel anaylsis
# caching.
'--tool_tag=tulsi:bazel_build',
'--build_event_json_file=%s' % self.build_events_file_path,
'--noexperimental_build_event_json_file_path_conversion',
'--aspects', '@tulsi//:tulsi/tulsi_aspects.bzl%tulsi_outputs_aspect'])
if self.is_test and self.gen_runfiles:
bazel_command.append('--output_groups=+tulsi_outputs')
else:
bazel_command.append('--output_groups=tulsi_outputs,default')
bazel_command.extend(options.targets)
extra_options = bazel_options.BazelOptions(os.environ)
bazel_command.extend(extra_options.bazel_feature_flags())
return (bazel_command, 0)
def _RunBazelAndPatchOutput(self, command):
"""Runs subprocess command, patching output as it's received."""
self._PrintVerbose('Running "%s", patching output for workspace root at '
'"%s" with project path at "%s".' %
(' '.join([pipes.quote(x) for x in command]),
self.workspace_root,
self.project_dir))
# Clean up bazel output to make it look better in Xcode.
bazel_line_regex = re.compile(
r'(INFO|DEBUG|WARNING|ERROR|FAILED): ([^:]+:\d+:(?:\d+:)?)\s+(.+)')
bazel_generic_regex = re.compile(r'(INFO|DEBUG|WARNING|ERROR|FAILED): (.*)')
def PatchBazelDiagnosticStatements(output_line):
"""Make Bazel output more Xcode friendly."""
def BazelLabelToXcodeLabel(bazel_label):
"""Map Bazel labels to xcode labels for build output."""
xcode_labels = {
'INFO': 'note',
'DEBUG': 'note',
'WARNING': 'warning',
'ERROR': 'error',
'FAILED': 'error'
}
return xcode_labels.get(bazel_label, bazel_label)
match = bazel_line_regex.match(output_line)
if match:
xcode_label = BazelLabelToXcodeLabel(match.group(1))
output_line = '%s %s: %s' % (match.group(2), xcode_label,
match.group(3))
else:
match = bazel_generic_regex.match(output_line)
if match:
xcode_label = BazelLabelToXcodeLabel(match.group(1))
output_line = '%s: %s' % (xcode_label, match.group(2))
return output_line
patch_xcode_parsable_line = None
# Always patch outputs for XCHammer.
# if self.workspace_root != self.project_dir:
if True:
# Match (likely) filename:line_number: lines.
xcode_parsable_line_regex = re.compile(r'([^/][^:]+):\d+:')
def PatchOutputLine(output_line):
output_line = PatchBazelDiagnosticStatements(output_line)
if xcode_parsable_line_regex.match(output_line):
output_line = '%s/%s' % (self.workspace_root, output_line)
return output_line
patch_xcode_parsable_line = PatchOutputLine
else:
patch_xcode_parsable_line = PatchBazelDiagnosticStatements
def HandleOutput(output):
for line in output.splitlines():
_logger.log_bazel_message(patch_xcode_parsable_line(line))
def WatcherUpdate(watcher):
"""Processes any new events in the given watcher.
Args:
watcher: a BazelBuildEventsWatcher object.
Returns:
A list of new tulsiout file names seen.
"""
new_events = watcher.check_for_new_events()
new_outputs = []
for build_event in new_events:
if build_event.stderr:
HandleOutput(build_event.stderr)
if build_event.stdout:
HandleOutput(build_event.stdout)
if build_event.files:
outputs = [x for x in build_event.files if x.endswith('.tulsiouts')]
new_outputs.extend(outputs)
return new_outputs
def ReaderThread(file_handle, out_buffer):
out_buffer.append(file_handle.read())
file_handle.close()
# Make sure the BEP JSON file exists and is empty. We do this to prevent
# any sort of race between the watcher, bazel, and the old file contents.
open(self.build_events_file_path, 'w').close()
# Capture the stderr and stdout from Bazel. We only display it if it we're
# unable to read any BEP events.
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1)
# Register atexit function to clean up BEP file.
atexit.register(_BEPFileExitCleanup, self.build_events_file_path)
global CLEANUP_BEP_FILE_AT_EXIT
CLEANUP_BEP_FILE_AT_EXIT = True
# Start capturing output from Bazel.
reader_buffer = []
reader_thread = threading.Thread(target=ReaderThread,
args=(process.stdout, reader_buffer))
reader_thread.daemon = True
reader_thread.start()
with io.open(self.build_events_file_path, 'r', -1, 'utf-8', 'ignore'
) as bep_file:
watcher = bazel_build_events.BazelBuildEventsWatcher(bep_file,
_PrintXcodeWarning)
output_locations = []
while process.returncode is None:
output_locations.extend(WatcherUpdate(watcher))
time.sleep(0.1)
process.poll()
output_locations.extend(WatcherUpdate(watcher))
# If BEP JSON parsing failed, we should display the raw stdout and
# stderr from Bazel.
reader_thread.join()
if not watcher.has_read_events():
HandleOutput(reader_buffer[0])
if process.returncode == 0 and not output_locations:
CLEANUP_BEP_FILE_AT_EXIT = False
_PrintXcodeError('Unable to find location of the .tulsiouts file.'
'Please report this as a Tulsi bug, including the'
'contents of %s.' % self.build_events_file_path)
return 1, output_locations
return process.returncode, output_locations
def _ExtractAspectOutputsData(self, output_files):
"""Converts aspect output from paths to json to a list of dictionaries.
Args:
output_files: A list of strings to files representing Bazel aspect output
in UTF-8 JSON format.
Returns:
return_code, [dict]: A tuple with a return code as its first argument and
for its second argument, a list of dictionaries for
each output_file that could be interpreted as valid
JSON, representing the returned Bazel aspect
information.
return_code, None: If an error occurred while converting the list of
files into JSON.
"""
outputs_data = []
for output_file in output_files:
try:
output_data = json.load(open(output_file))
except (ValueError, IOError) as e:
_PrintXcodeError('Failed to load output map ""%s". '
'%s' % (output_file, e))
return 600, None
outputs_data.append(output_data)
return 0, outputs_data
def _GetBundleSourceLocation(self, artifact_archive_root, bundle_subpath):
if not artifact_archive_root or not bundle_subpath:
return None
source_location = os.path.join(artifact_archive_root, bundle_subpath)
return source_location if os.path.isdir(source_location) else None
def _InstallArtifact(self, outputs_data):
"""Installs Bazel-generated artifacts into the Xcode output directory."""
xcode_artifact_path = self.artifact_output_path
if not outputs_data:
_PrintXcodeError('Failed to load top level output file.')
return 600
primary_output_data = outputs_data[0]
if 'artifact' not in primary_output_data:
_PrintXcodeError(
'Failed to find an output artifact for target %s in output map %r' %
(xcode_artifact_path, primary_output_data))
return 601
primary_artifact = primary_output_data['artifact']
artifact_archive_root = primary_output_data.get('archive_root')
bundle_name = primary_output_data.get('bundle_name')
# The PRODUCT_NAME used by the Xcode project is not trustable as it may be
# modified by the user and, more importantly, may have been modified by
# Tulsi to disambiguate multiple targets with the same name.
self.bazel_product_name = bundle_name
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = primary_artifact.endswith('.ipa')
is_zip = primary_artifact.endswith('.zip')
if is_ipa or is_zip:
expected_bundle_name = bundle_name + self.wrapper_suffix
# The directory structure within the IPA is then determined based on
# Bazel's package and/or product type.
if is_ipa:
bundle_subpath = os.path.join('Payload', expected_bundle_name)
else:
# If the artifact is a ZIP, assume that the bundle is the top-level
# directory (this is the way in which Skylark rules package artifacts
# that are not standalone IPAs).
bundle_subpath = expected_bundle_name
# Prefer to copy over files from the archive root instead of unzipping the
# ipa/zip in order to help preserve timestamps. Note that the archive root
# is only present for local builds; for remote builds we must extract from
# the zip file.
source_location = self._GetBundleSourceLocation(artifact_archive_root, bundle_subpath)
if source_location:
exit_code = self._RsyncBundle(os.path.basename(primary_artifact),
source_location,
xcode_artifact_path)
elif self._IsValidArtifactArchiveRoot(artifact_archive_root, bundle_name):
exit_code = self._RsyncBundle(os.path.basename(primary_artifact),
source_location,
xcode_artifact_path)
else:
exit_code = self._UnpackTarget(primary_artifact,
xcode_artifact_path,
bundle_subpath)
if exit_code:
return exit_code
elif os.path.isfile(primary_artifact):
# Remove the old artifact before copying.
if os.path.isfile(xcode_artifact_path):
try:
os.remove(xcode_artifact_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output file ""%s". '
'%s' % (xcode_artifact_path, e))
return 600
exit_code = self._CopyFile(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
if exit_code:
return exit_code
else:
self._RsyncBundle(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
# When the rules output a tree artifact, Tulsi will copy the bundle as is
# into the expected Xcode output location. But because they're copied as
# is from the bazel output, they come with bazel's permissions, which are
# read only. Here we set them to write as well, so Xcode can modify the
# bundle too (for example, for codesigning).
chmod_timer = Timer('Modifying permissions of output bundle',
'bundle_chmod').Start()
self._PrintVerbose('Spawning subprocess to add write permissions to '
'copied bundle...')
process = subprocess.Popen(['chmod', '-R', 'uga+w', xcode_artifact_path])
process.wait()
chmod_timer.End()
# No return code check as this is not an essential operation.
self._InstallEmbeddedBundlesIfNecessary(primary_output_data)
return 0
def _IsValidArtifactArchiveRoot(self, archive_root, bundle_name):
"""Returns true if the archive root is valid for use."""
if not archive_root or not os.path.isdir(archive_root):
return False
# The archive root will not be updated for any remote builds, but will be
# valid for local builds. We detect this by using an implementation detail
# of the rules_apple bundler: archives will always be transformed from
# <name>.unprocessed.zip (locally or remotely) to <name>.archive-root.
#
# Thus if the mod time on the archive root is not greater than the mod
# time on the on the zip, the archive root is not valid. Remote builds
# will end up copying the <name>.unprocessed.zip but not the
# <name>.archive-root, making this a valid temporary solution.
#
# In the future, it would be better to have this handled by the rules;
# until then this should suffice as a work around to improve build times.
unprocessed_zip = os.path.join(os.path.dirname(archive_root),
'%s.unprocessed.zip' % bundle_name)
if not os.path.isfile(unprocessed_zip):
return False
return os.path.getmtime(archive_root) > os.path.getmtime(unprocessed_zip)
def _InstallEmbeddedBundlesIfNecessary(self, output_data):
"""Install embedded bundles next to the current target's output."""
# In order to find and load symbols for the binary installed on device,
# Instruments needs to "see" it in Spotlight index somewhere on the local
# filesystem. This is only needed for on-device instrumentation.
#
# Unfortunatelly, it does not seem to be possible to detect when a build is
# being made for profiling, thus we can't exclude this step for on-device
# non-profiling builds.
if self.is_simulator or ('embedded_bundles' not in output_data):
return
timer = Timer('Installing embedded bundles',
'installing_embedded_bundles').Start()
for bundle_info in output_data['embedded_bundles']:
bundle_name = bundle_info['bundle_name']
bundle_extension = bundle_info['bundle_extension']
full_name = bundle_name + bundle_extension
output_path = os.path.join(self.built_products_dir, full_name)
# TODO(b/68936732): See if copying just the binary (not the whole bundle)
# is enough to make Instruments work.
if self._IsValidArtifactArchiveRoot(bundle_info['archive_root'],
bundle_name):
source_path = os.path.join(bundle_info['archive_root'], full_name)
self._RsyncBundle(full_name, source_path, output_path)
else:
# Try to find the embedded bundle within the installed main bundle.
bundle_path = self._FindEmbeddedBundleInMain(bundle_name,
bundle_extension)
if bundle_path:
self._RsyncBundle(full_name, bundle_path, output_path)
else:
_PrintXcodeWarning('Could not find bundle %s in main bundle. ' %
(full_name) +
'Device-level Instruments debugging will be '
'disabled for this bundle. Please report a '
'Tulsi bug and attach a full Xcode build log.')
timer.End()
# Maps extensions to anticipated subfolders.
_EMBEDDED_BUNDLE_PATHS = {
'.appex': 'PlugIns',
'.framework': 'Frameworks'
}
def _FindEmbeddedBundleInMain(self, bundle_name, bundle_extension):
"""Retrieves the first embedded bundle found within our main bundle."""
main_bundle = os.environ.get('EXECUTABLE_FOLDER_PATH')
if not main_bundle:
return None
main_bundle_path = os.path.join(self.built_products_dir,
main_bundle)
return self._FindEmbeddedBundle(bundle_name,
bundle_extension,
main_bundle_path)
def _FindEmbeddedBundle(self, bundle_name, bundle_extension, bundle_path):
"""Retrieves the first embedded bundle found within this bundle path."""
embedded_subfolder = self._EMBEDDED_BUNDLE_PATHS.get(bundle_extension)
if not embedded_subfolder:
return None
projected_bundle_path = os.path.join(bundle_path,
embedded_subfolder,
bundle_name + bundle_extension)
if os.path.isdir(projected_bundle_path):
return projected_bundle_path
# For frameworks not in the main app bundle, and possibly other executable
# bundle content in the future, we recurse through every .appex in PlugIns
# to find those frameworks.
#
# This won't support frameworks that could potentially have the same name
# but are different between the app and extensions, but we intentionally
# choose not to handle that case. Xcode build system only supports
# uniquely named frameworks, and we shouldn't confuse the dynamic loader
# with frameworks that have the same image names but different content.
appex_root_path = os.path.join(bundle_path, 'PlugIns')
if not os.path.isdir(appex_root_path):
return None
# Find each directory within appex_root_path and attempt to find a bundle.
# If one can't be found, return None.
appex_dirs = os.listdir(appex_root_path)
for appex_dir in appex_dirs:
appex_path = os.path.join(appex_root_path, appex_dir)
path = self._FindEmbeddedBundle(bundle_name,
bundle_extension,
appex_path)
if path:
return path
return None
def _InstallGeneratedHeaders(self, outputs):
"""Invokes install_genfiles.py to install generated Bazel files."""
genfiles_timer = Timer('Installing generated headers',
'installing_generated_headers').Start()
# Resolve the path to the install_genfiles.py script.
# It should be in the same directory as this script.
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'install_genfiles.py')
args = [path, self.bazel_exec_root]
args.extend(outputs)
self._PrintVerbose('Spawning subprocess install_genfiles.py to copy '
'generated files in the background...')
process = subprocess.Popen(args)
process.wait()
genfiles_timer.End()
def _InstallBundle(self, source_path, output_path):
"""Copies the bundle at source_path to output_path."""
if not os.path.isdir(source_path):
return 0, None
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale bundle ""%s". '
'%s' % (output_path, e))
return 700, None
exit_code = self._CopyBundle(os.path.basename(source_path),
source_path,
output_path)
return exit_code, output_path
def _RsyncBundle(self, source_path, full_source_path, output_path):
"""Rsyncs the given bundle to the given expected output path."""
self._PrintVerbose('Rsyncing %s to %s' % (source_path, output_path))
# rsync behavior changes based on presence of a trailing slash.
if not full_source_path.endswith('/'):
full_source_path += '/'
try:
# Use -c to check differences by checksum, -v for verbose
# The rest of the flags are the same as -a but without preserving
# timestamps, which is done intentionally so the timestamp will
# only change when the file is changed.
subprocess.check_output(['rsync',
'-vcrlpgoD',
full_source_path,
output_path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
_PrintXcodeError('Rsync failed. %s' % e)
return 650
return 0
def _CopyBundle(self, source_path, full_source_path, output_path):
"""Copies the given bundle to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
try:
CopyOnWrite(full_source_path, output_path, tree=True)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _CopyFile(self, source_path, full_source_path, output_path):
"""Copies the given file to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
output_path_dir = os.path.dirname(output_path)
if not os.path.exists(output_path_dir):
try:
os.makedirs(output_path_dir)
except OSError as e:
_PrintXcodeError('Failed to create output directory "%s". '
'%s' % (output_path_dir, e))
return 650
try:
CopyOnWrite(full_source_path, output_path)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _UnpackTarget(self, bundle_path, output_path, bundle_subpath):
"""Unpacks generated bundle into the given expected output path."""
self._PrintVerbose('Unpacking %s to %s' % (bundle_path, output_path))
if not os.path.isfile(bundle_path):
_PrintXcodeError('Generated bundle not found at "%s"' % bundle_path)
return 670
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output directory ""%s". '
'%s' % (output_path, e))
return 600
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = bundle_path.endswith('.ipa')
with zipfile.ZipFile(bundle_path, 'r') as zf:
for item in zf.infolist():
filename = item.filename
# Support directories do not seem to be needed by the debugger and are
# skipped.
basedir = filename.split(os.sep)[0]
if basedir.endswith('Support') or basedir.endswith('Support2'):
continue
if len(filename) < len(bundle_subpath):
continue
attributes = (item.external_attr >> 16) & 0o777
self._PrintVerbose('Extracting %s (%o)' % (filename, attributes),
level=1)
if not filename.startswith(bundle_subpath):
_PrintXcodeWarning('Mismatched extraction path. Bundle content '
'at "%s" expected to have subpath of "%s"' %
(filename, bundle_subpath))
dir_components = self._SplitPathComponents(filename)
# Get the file's path, ignoring the payload components if the archive
# is an IPA.
if is_ipa:
subpath = os.path.join(*dir_components[2:])
else:
subpath = os.path.join(*dir_components[1:])
target_path = os.path.join(output_path, subpath)
# Ensure the target directory exists.
try:
target_dir = os.path.dirname(target_path)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
except OSError as e:
_PrintXcodeError(
'Failed to create target path "%s" during extraction. %s' % (
target_path, e))
return 671
# If the archive item looks like a file, extract it.
if not filename.endswith(os.sep):
with zf.open(item) as src, file(target_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
# Patch up the extracted file's attributes to match the zip content.
if attributes:
os.chmod(target_path, attributes)
return 0
def _InstallDSYMBundles(self, output_dir, outputs_data):
"""Copies any generated dSYM bundles to the given directory."""
dsym_to_process = set()
primary_output_data = outputs_data[0]
if primary_output_data['has_dsym']:
# Declares the Xcode-generated name of our main target's dSYM.
# This environment variable is always set, for any possible Xcode output
# that could generate a dSYM bundle.
#
# Note that this may differ from the Bazel name as Tulsi may modify the
# Xcode `BUNDLE_NAME`, so we need to make sure we use Bazel as the source
# of truth for Bazel's dSYM name, but copy it over to where Xcode expects.
xcode_target_dsym = os.environ.get('DWARF_DSYM_FILE_NAME')
if xcode_target_dsym:
dsym_path = primary_output_data.get('dsym_path')
if dsym_path:
dsym_to_process.add((dsym_path, xcode_target_dsym))
else:
_PrintXcodeWarning('Unable to resolve dSYM paths for main bundle %s' %
primary_output_data)
# Collect additional dSYM bundles generated by the dependencies of this
# build such as extensions or frameworks. Note that a main target may not
# have dSYMs while subtargets (like an xctest) still can have them.
child_dsyms = set()
for data in outputs_data:
for bundle_info in data.get('embedded_bundles', []):
if not bundle_info['has_dsym']:
continue
dsym_path = bundle_info.get('dsym_path')
if dsym_path:
child_dsyms.add((dsym_path, os.path.basename(dsym_path)))
else:
_PrintXcodeWarning(
'Unable to resolve dSYM paths for embedded bundle %s'
% bundle_info)
dsym_to_process.update(child_dsyms)
if not dsym_to_process:
return 0, None
# Start the timer now that we know we have dSYM bundles to install.
timer = Timer('Installing dSYM bundles', 'installing_dsym').Start()
dsyms_found = []
for input_dsym_full_path, xcode_dsym_name in dsym_to_process:
output_full_path = os.path.join(output_dir, xcode_dsym_name)
exit_code, path = self._InstallBundle(input_dsym_full_path,
output_full_path)
if exit_code:
_PrintXcodeWarning('Failed to install dSYM to "%s" (%s)'
% (input_dsym_full_path, exit_code))
elif path is None:
_PrintXcodeWarning('Did not find a dSYM bundle at %s'
% input_dsym_full_path)
else:
dsyms_found.append(path)
timer.End()
return 0, dsyms_found
def _ResignBundle(self, bundle_path, signing_identity, entitlements=None):
"""Re-signs the bundle with the given signing identity and entitlements."""
if not self.codesigning_allowed:
return 0
timer = Timer('\tSigning ' + bundle_path, 'signing_bundle').Start()
command = [
'xcrun',
'codesign',
'-f',
'--timestamp=none',
'-s',
signing_identity,
]
if entitlements:
command.extend(['--entitlements', entitlements])
else:
command.append('--preserve-metadata=entitlements')
command.append(bundle_path)
returncode, output = self._RunSubprocess(command)
timer.End()
if returncode:
_PrintXcodeError('Re-sign command %r failed. %s' % (command, output))
return 800 + returncode
return 0
def _ResignTestArtifacts(self):
"""Resign test related artifacts that Xcode injected into the outputs."""
if not self.is_test:
return 0
# Extract the signing identity from the bundle at the expected output path
# since that's where the signed bundle from bazel was placed.
signing_identity = self._ExtractSigningIdentity(self.artifact_output_path)
if not signing_identity:
return 800
exit_code = 0
timer = Timer('Re-signing injected test host artifacts',
'resigning_test_host').Start()
if self.test_host_binary:
# For Unit tests, we need to resign the frameworks that Xcode injected
# into the test host bundle.
test_host_bundle = os.path.dirname(self.test_host_binary)
exit_code = self._ResignXcodeTestFrameworks(
test_host_bundle, signing_identity)
else:
# For UI tests, we need to resign the UI test runner app and the
# frameworks that Xcode injected into the runner app. The UI Runner app
# also needs to be signed with entitlements.
exit_code = self._ResignXcodeTestFrameworks(
self.codesigning_folder_path, signing_identity)
if exit_code == 0:
entitlements_path = self._InstantiateUIRunnerEntitlements()
if entitlements_path:
exit_code = self._ResignBundle(
self.codesigning_folder_path,
signing_identity,
entitlements_path)
else:
_PrintXcodeError('Could not instantiate UI runner entitlements.')
exit_code = 800
timer.End()
return exit_code
def _ResignXcodeTestFrameworks(self, bundle, signing_identity):
"""Re-signs the support frameworks injected by Xcode in the given bundle."""
if not self.codesigning_allowed:
return 0
for framework in XCODE_INJECTED_FRAMEWORKS:
framework_path = os.path.join(
bundle, 'Frameworks', framework)
if os.path.isdir(framework_path) or os.path.isfile(framework_path):
exit_code = self._ResignBundle(framework_path, signing_identity)
if exit_code != 0:
return exit_code
return 0
def _InstantiateUIRunnerEntitlements(self):
"""Substitute team and bundle identifiers into UI runner entitlements.
This method throws an IOError exception if the template wasn't found in
its expected location, or an OSError if the expected output folder could
not be created.
Returns:
The path to where the entitlements file was generated.
"""
if not self.codesigning_allowed:
return None
if not os.path.exists(self.derived_sources_folder_path):
os.makedirs(self.derived_sources_folder_path)
output_file = os.path.join(
self.derived_sources_folder_path,
self.bazel_product_name + '_UIRunner.entitlements')
if os.path.exists(output_file):
os.remove(output_file)
with open(self.runner_entitlements_template, 'r') as template:
contents = template.read()
contents = contents.replace(
'$(TeamIdentifier)',
self._ExtractSigningTeamIdentifier(self.artifact_output_path))
contents = contents.replace(
'$(BundleIdentifier)',
self._ExtractSigningBundleIdentifier(self.artifact_output_path))
with open(output_file, 'w') as output:
output.write(contents)
return output_file
def _ExtractSigningIdentity(self, signed_bundle):
"""Returns the identity used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Authority')
def _ExtractSigningTeamIdentifier(self, signed_bundle):
"""Returns the team identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'TeamIdentifier')
def _ExtractSigningBundleIdentifier(self, signed_bundle):
"""Returns the bundle identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Identifier')
def _ExtractSigningAttribute(self, signed_bundle, attribute):
"""Returns the attribute used to sign the given bundle path."""
if not self.codesigning_allowed:
return '<CODE_SIGNING_ALLOWED=NO>'
cached = self.codesign_attributes.get(signed_bundle)
if cached:
return cached.Get(attribute)
timer = Timer('\tExtracting signature for ' + signed_bundle,
'extracting_signature').Start()
output = subprocess.check_output(['xcrun',
'codesign',
'-dvv',
signed_bundle],
stderr=subprocess.STDOUT)
timer.End()
bundle_attributes = CodesignBundleAttributes(output)
self.codesign_attributes[signed_bundle] = bundle_attributes
return bundle_attributes.Get(attribute)
def _UpdateLLDBInit(self, clear_source_map=False):
"""Updates lldbinit to enable debugging of Bazel binaries."""
# An additional lldbinit file that we should load in the lldbinit file
# we are about to write.
additional_lldbinit = None
if self.custom_lldbinit is None:
# Write our settings to the global ~/.lldbinit-tulsiproj file when no
# custom lldbinit is provided.
lldbinit_file = TULSI_LLDBINIT_FILE
# Make sure a reference to ~/.lldbinit-tulsiproj exists in ~/.lldbinit or
# ~/.lldbinit-Xcode. Priority is given to ~/.lldbinit-Xcode if it exists,
# otherwise the bootstrapping will be written to ~/.lldbinit.
BootstrapLLDBInit(True)
else:
# Remove any reference to ~/.lldbinit-tulsiproj if the global lldbinit was
# previously bootstrapped. This prevents the global lldbinit from having
# side effects on the custom lldbinit file.
BootstrapLLDBInit(False)
# When using a custom lldbinit, Xcode will directly load our custom file
# so write our settings to this custom file. Retain standard Xcode
# behavior by loading the default file in our custom file.
lldbinit_file = self.custom_lldbinit
additional_lldbinit = _FindDefaultLldbInit()
project_basename = os.path.basename(self.project_file_path)
workspace_root = self._NormalizePath(self.workspace_root)
with open(lldbinit_file, 'w') as out:
out.write('# This file is autogenerated by Tulsi and should not be '
'edited.\n')
if additional_lldbinit is not None:
out.write('# This loads the default lldbinit file to retain standard '
'Xcode behavior.\n')
out.write('command source "%s"\n' % additional_lldbinit)
out.write('# This sets lldb\'s working directory to the Bazel workspace '
'root used by %r.\n' % project_basename)
out.write('platform settings -w "%s"\n' % workspace_root)
if clear_source_map:
out.write('settings clear target.source-map\n')
return 0
if self.normalized_prefix_map:
source_map = ('./', workspace_root)
out.write('# This maps the normalized root to that used by '
'%r.\n' % project_basename)
else:
# NOTE: settings target.source-map is different from
# DBGSourcePathRemapping; the former is an LLDB target-level
# remapping API that rewrites breakpoints, the latter is an LLDB
# module-level remapping API that changes DWARF debug info in memory.
#
# If we had multiple remappings, it would not make sense for the
# two APIs to share the same mappings. They have very different
# side-effects in how they individually handle debug information.
source_map = self._ExtractTargetSourceMap()
out.write('# This maps Bazel\'s execution root to that used by '
'%r.\n' % project_basename)
out.write('settings set target.source-map "%s" "%s"\n' % source_map)
return 0
def _DWARFdSYMBinaries(self, dsym_bundle_path):
"""Returns an array of abs paths to DWARF binaries in the dSYM bundle.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
Returns:
str[]: a list of strings representing the absolute paths to each binary
found within the dSYM bundle.
"""
dwarf_dir = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'DWARF')
dsym_binaries = []
for f in os.listdir(dwarf_dir):
# Ignore hidden files, such as .DS_Store files.
if not f.startswith('.'):
# Append full path info.
dsym_binary = os.path.join(dwarf_dir, f)
dsym_binaries.append(dsym_binary)
return dsym_binaries
def _UUIDInfoForBinary(self, source_binary_path):
"""Returns exit code of dwarfdump along with every UUID + arch found.
Args:
source_binary_path: absolute path to the binary file.
Returns:
(Int, str[(str, str)]): a tuple containing the return code of dwarfdump
as its first element, and a list of strings
representing each UUID found for each given
binary slice found within the binary with its
given architecture, if no error has occcured.
"""
returncode, output = self._RunSubprocess([
'xcrun',
'dwarfdump',
'--uuid',
source_binary_path
])
if returncode:
_PrintXcodeWarning('dwarfdump returned %d while finding the UUID for %s'
% (returncode, source_binary_path))
return (returncode, [])
# All UUIDs for binary slices will be returned as the second from left,
# from output; "UUID: D4DE5AA2-79EE-36FE-980C-755AED318308 (x86_64)
# /Applications/Calendar.app/Contents/MacOS/Calendar"
uuids_found = []
for dwarfdump_output in output.split('\n'):
if not dwarfdump_output:
continue
found_output = re.match(r'^(?:UUID: )([^ ]+) \(([^)]+)', dwarfdump_output)
if not found_output:
continue
found_uuid = found_output.group(1)
if not found_uuid:
continue
found_arch = found_output.group(2)
if not found_arch:
continue
uuids_found.append((found_uuid, found_arch))
return (0, uuids_found)
def _CreateUUIDPlist(self, dsym_bundle_path, uuid, arch, source_maps):
"""Creates a UUID.plist in a dSYM bundle to redirect sources.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
uuid: string representing the UUID of the binary slice with paths to
remap in the dSYM bundle.
arch: the architecture of the binary slice.
source_maps: list of tuples representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with the
paths to Xcode-visible sources used for the purposes of
Tulsi debugging as strings ($1).
Returns:
Bool: True if no error was found, or False, representing a failure to
write when creating the plist.
"""
# Create a UUID plist at (dsym_bundle_path)/Contents/Resources/.
remap_plist = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'%s.plist' % uuid)
# Via an XML plist, add the mappings from _ExtractTargetSourceMap().
try:
with open(remap_plist, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
'<plist version="1.0">\n'
'<dict>\n'
'<key>DBGSourcePathRemapping</key>\n'
'<dict>\n')
for source_map in source_maps:
# Add the mapping as a DBGSourcePathRemapping to the UUID plist here.
out.write('<key>%s</key>\n<string>%s</string>\n' % source_map)
# Make sure that we also set DBGVersion to 3.
out.write('</dict>\n'
'<key>DBGVersion</key>\n'
'<string>3</string>\n'
'</dict>\n'
'</plist>\n')
except OSError as e:
_PrintXcodeError('Failed to write %s, received error %s' %
(remap_plist, e))
return False
# Update the dSYM symbol cache with a reference to this dSYM bundle.
err_msg = self.update_symbol_cache.UpdateUUID(uuid,
dsym_bundle_path,
arch)
if err_msg:
_PrintXcodeWarning('Attempted to save (uuid, dsym_bundle_path, arch) '
'to DBGShellCommands\' dSYM cache, but got error '
'\"%s\".' % err_msg)
return True
def _CleanExistingDSYMs(self):
"""Clean dSYM bundles that were left over from a previous build."""
output_dir = self.built_products_dir
output_dir_list = os.listdir(output_dir)
for item in output_dir_list:
if item.endswith('.dSYM'):
shutil.rmtree(os.path.join(output_dir, item))
def _PlistdSYMPaths(self, dsym_bundle_path):
"""Adds Plists to a given dSYM bundle to redirect DWARF data."""
# Retrieve the paths that we are expected to remap.
# Always include a direct path from the execroot to Xcode-visible sources.
source_maps = [self._ExtractTargetSourceMap()]
# Remap relative paths from the workspace root.
if self.normalized_prefix_map:
# Take the normalized path and map that to Xcode-visible sources.
source_maps.append(('./', self._NormalizePath(self.workspace_root)))
# Find the binaries within the dSYM bundle. UUIDs will match that of the
# binary it was based on.
dsym_binaries = self._DWARFdSYMBinaries(dsym_bundle_path)
if not dsym_binaries:
_PrintXcodeWarning('Could not find the binaries that the dSYM %s was '
'based on to determine DWARF binary slices to patch. '
'Debugging will probably fail.' % (dsym_bundle_path))
return 404
# Find the binary slice UUIDs with dwarfdump from each binary.
for source_binary_path in dsym_binaries:
returncode, uuid_info_found = self._UUIDInfoForBinary(source_binary_path)
if returncode:
return returncode
# Create a plist per UUID, each indicating a binary slice to remap paths.
for uuid, arch in uuid_info_found:
plist_created = self._CreateUUIDPlist(dsym_bundle_path,
uuid,
arch,
source_maps)
if not plist_created:
return 405
return 0
def _NormalizePath(self, path):
"""Returns paths with a common form, normalized with a trailing slash.
Args:
path: a file system path given in the form of a string.
Returns:
str: a normalized string with a trailing slash, based on |path|.
"""
return os.path.normpath(path) + os.sep
def _ExtractCachableTargetSourceMap(self, normalize=True):
""" Return a cacheable source Map
Expect all builds to write the same debug info to all object files
"""
# Map the local sources to the __BAZEL_WORKSPACE_DIR__
cache_dir = "./"
return (cache_dir, self.workspace_root)
def _ExtractTargetSourceMap(self, normalize=True):
"""Extracts the source path as a tuple associated with the WORKSPACE path.
Args:
normalize: Defines if all paths should be normalized. Preferred for APIs
like DBGSourcePathRemapping and target.source-map but won't
work for the purposes of -fdebug-prefix-map.
Returns:
None: if an error occurred.
(str, str): a single tuple representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with
the paths to Xcode-visible sources used for the purposes
of Tulsi debugging as strings ($1).
"""
if os.environ.get('HAMMER_USE_DEBUG_INFO_REMAPPING', 'NO') == 'YES':
return self._ExtractCachableTargetSourceMap(normalize=normalize)
# All paths route to the "workspace root" for sources visible from Xcode.
sm_destpath = self.workspace_root
if normalize:
sm_destpath = self._NormalizePath(sm_destpath)
# Add a redirection for the Bazel execution root, the path where sources
# are referenced by Bazel.
sm_execroot = self.bazel_exec_root
if normalize:
sm_execroot = self._NormalizePath(sm_execroot)
return (sm_execroot, sm_destpath)
def _LinkTulsiWorkspace(self):
"""Links the Bazel Workspace to the Tulsi Workspace (`tulsi-workspace`)."""
tulsi_workspace = os.path.join(self.project_file_path,
'.tulsi',
'tulsi-workspace')
if os.path.islink(tulsi_workspace):
os.unlink(tulsi_workspace)
os.symlink(self.bazel_exec_root, tulsi_workspace)
if not os.path.exists(tulsi_workspace):
_PrintXcodeError(
'Linking Tulsi Workspace to %s failed.' % tulsi_workspace)
return -1
@staticmethod
def _SplitPathComponents(path):
"""Splits the given path into an array of all of its components."""
components = path.split(os.sep)
# Patch up the first component if path started with an os.sep
if not components[0]:
components[0] = os.sep
return components
def _RunSubprocess(self, cmd):
"""Runs the given command as a subprocess, returning (exit_code, output)."""
self._PrintVerbose('%r' % cmd, 1)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = process.communicate()
return (process.returncode, output)
def _PrintVerbose(self, msg, level=0):
if self.verbose > level:
_PrintUnbuffered(msg)
def main(argv):
build_settings = bazel_build_settings.BUILD_SETTINGS
if build_settings is None:
_Fatal('Unable to resolve build settings. Please report a Tulsi bug.')
return 1
return BazelBuildBridge(build_settings).Run(argv)
if __name__ == '__main__':
# Register the interrupt handler immediately in case we receive SIGINT while
# trying to acquire the lock.
signal.signal(signal.SIGINT, _InterruptHandler)
_LockFileAcquire(_LockFileCreate())
_logger = tulsi_logging.Logger()
logger_warning = tulsi_logging.validity_check()
if logger_warning:
_PrintXcodeWarning(logger_warning)
_timer = Timer('Everything', 'complete_build').Start()
_exit_code = main(sys.argv)
_timer.End()
sys.exit(_exit_code)
|
run_background.py
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import django
django.setup()
import urllib
import math
import threading
import time
from datetime import datetime, timedelta
from django.utils import timezone
from main.models import Updater, LastActivity, AccessKey
from main.appconfig import type_data
from django.conf import settings
REPORT_PUSH = settings.REPORT_PUSH
DEBUG = settings.RUNNER_DEBUG
DEFAULT_UPDATE_INTERVAL = 15
def thread_runner(instance):
if DEBUG:
print "running instance", instance.id
runtime = time.time()
delay = DEFAULT_UPDATE_INTERVAL
try:
updater_props = type_data.get(instance.type, None)
delay = updater_props.get('delay', DEFAULT_UPDATE_INTERVAL)
lu = LastActivity.objects.filter(user=instance.user)
key = AccessKey.objects.filter(user=instance.user)
recent = timezone.now() - timedelta(seconds=120)
if not lu.count() or lu[0].timestamp < recent:
if DEBUG: print "Not active"
delay = delay * 20
else:
if DEBUG: print "Active"
runner = updater_props['runner']
instance = getattr(instance, updater_props['prop'])
changed = runner(instance)
if changed > 0 and REPORT_PUSH:
if len(key):
urllib.urlopen("http://localhost:8765/api?key=%s" % key[0].key).read()
instance.last_update = timezone.now()
instance.next_update = timezone.now() + timedelta(seconds=delay)
# We leave messages + timestamps so we can see old failures even if the system recovered.
instance.failure_count = 0
instance.running = False
instance.save()
except Exception, E:
msg = "Attempting to update %s failed for %s: \n %s: %s" % (instance.type, instance.id, type(E), E)
print msg
# we don't update last_update on failure.
instance.last_failure = timezone.now()
instance.last_failure_message = msg
instance.failure_count = instance.failure_count + 1
# exponential backoff
update_time = delay * math.pow(3, instance.failure_count)
instance.next_update = timezone.now() + timedelta(seconds=update_time)
instance.running = False
instance.save()
if DEBUG:
print "finished", instance.id, " in ", time.time()-runtime
def run():
# Reset any running tasks at runner start; if anything got stuck
# because of a restart, we want to clear it when we start.
for i in Updater.objects.filter(running=True):
i.running = False
i.save()
while True:
try:
time_threshold = timezone.now()
for i in Updater.objects.filter(next_update__lt=time_threshold, failure_count__lt=5, running=False).order_by('next_update'):
updater_props = type_data.get(i.type, None)
if not updater_props: continue
i = getattr(i, updater_props['prop'])
# Set next update here; then reset it again when the function actually finishes.
i.running = True
i.save()
t = threading.Thread(target=thread_runner, args=[i])
t.start()
except Exception, E:
print "Something very basic went wrong with something: %s" % E
time.sleep(1)
if __name__ == "__main__":
run()
|
MenuBar.py
|
from retrieval.Document import Document
from gui.window.AboutWindow import AboutWindow
from gui.window.WarningPopup import WarningPopup
import os
import threading
import pickle
import webbrowser
import tkinter as tk
from tkinter import filedialog
class MenuBar:
def __init__(self, gui):
"""
The constructor for MenuBar class.
Parameters
----------
gui : gui
The main gui.
Returns
-------
None.
"""
self.__gui = gui
# Initialize the menu bar in window.
menu = tk.Menu(master=self.__gui.window)
self.__gui.window.config(menu=menu)
# Inititalize the file menu in menu bar.
file_menu = tk.Menu(menu, tearoff=False)
menu.add_cascade(label='File', menu=file_menu)
file_menu.add_command(label='New file', command=self.__new_window)
file_menu.add_command(label='Save index', command=self.__save_index)
file_menu.add_command(label='Load index', command=self.__load_index)
file_menu.add_separator()
file_menu.add_command(label='Update index', command=self.__update_index)
file_menu.add_separator()
file_menu.add_command(label='Exit', command=self.__gui.window.destroy)
# Initialize the option menu in menu bar.
for preprocessor_item in self.__gui.preprocessor_option:
preprocessor_item.set(True)
option_menu = tk.Menu(menu, tearoff=False)
menu.add_cascade(label='Option', menu=option_menu)
option_menu.add_checkbutton(label='Stop words removal', onvalue=1, offvalue=0, variable=self.__gui.preprocessor_option[0])
option_menu.add_checkbutton(label='Stemming', onvalue=1, offvalue=0, variable=self.__gui.preprocessor_option[1])
option_menu.add_checkbutton(label='Case folding', onvalue=1, offvalue=0, variable=self.__gui.preprocessor_option[2])
option_menu.add_checkbutton(label='Normalization', onvalue=1, offvalue=0, variable=self.__gui.preprocessor_option[3])
option_menu.add_separator()
self.__gui.autorenaming_option.set(True)
option_menu.add_checkbutton(label='Auto-renamed clusters', onvalue=1, offvalue=0, variable=self.__gui.autorenaming_option)
# Initialize the help menu in menu bar.
help_menu = tk.Menu(menu, tearoff=False)
menu.add_cascade(label='Help', menu=help_menu)
help_menu.add_command(label='Documentation', command=self.__documentation)
help_menu.add_separator()
help_menu.add_command(label='About', command=self.__about)
def __new_window(self):
"""
The method to reset the window.
Returns
-------
None.
"""
self.__gui._restart()
def __save_index(self):
"""
The method to save an inverted index as a pickle file.
Returns
-------
None.
"""
# Check whether an index is built or not.
if self.__gui.inverted_index is not None:
index_path = filedialog.asksaveasfilename(defaultextension='.pickle', filetypes=(('pickle file', '*.pickle'),))
# Store metadata.
metadata = {}
metadata['folder_path'] = self.__gui.folder_path
metadata['corpus'] = self.__gui.corpus
# Store preprocessor options.
preprocessor_option = []
preprocessor_option.append(self.__gui.preprocessor_option[0].get())
preprocessor_option.append(self.__gui.preprocessor_option[1].get())
preprocessor_option.append(self.__gui.preprocessor_option[2].get())
preprocessor_option.append(self.__gui.preprocessor_option[3].get())
metadata['preprocessor_option'] = preprocessor_option
# Store index and metadata.
data = {}
data['index'] = self.__gui.inverted_index
data['metadata'] = metadata
with open(index_path, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
popup = WarningPopup(self.__gui, 'Saving an index',
'There is no index to be saved!')
popup._start()
def __load_index(self):
"""
The method to load a saved inverted index.
Returns
-------
None.
"""
index_path = filedialog.askopenfilename(filetypes=(('pickle file', '*.pickle'),))
try:
with open(index_path, 'rb') as handle:
data = pickle.load(handle)
# Check whether the file loaded is the correct index file.
if isinstance(data, dict) and 'index' in data and 'metadata' in data and 'folder_path' in data['metadata'] and 'corpus' in data['metadata'] and 'preprocessor_option' in data['metadata']:
# Load the inverted index.
self.__gui.inverted_index = data['index']
# Load the metadata.
metadata = data['metadata']
self.__gui.folder_path = metadata['folder_path']
self.__gui.corpus = metadata['corpus']
self.__gui.preprocessor_option = metadata['preprocessor_option']
# Set the cluster status to True, indicating clustering process is ready to do.
self.__gui.cluster_status = True
popup = WarningPopup(self.__gui, 'Loading an index',
'An index is successfully loaded!')
popup._start()
else:
popup = WarningPopup(self.__gui, 'Loading an index',
'File loaded does not match!')
popup._start()
except EnvironmentError:
popup = WarningPopup(self.__gui, 'Loading an index',
'There is no index to be loaded!')
popup._start()
def __update_index(self):
"""
The method to update an inverted index.
Returns
-------
None
"""
# Check whether an index is built or not.
if self.__gui.inverted_index is not None:
folder_path = self.__gui.folder_path
try:
# Retrieve all .txt files in folder.
curr_doc_titles = []
for root, directories, files in os.walk(folder_path):
for file in files:
if '.txt' in file:
curr_doc_titles.append(file.replace('.txt', ''))
# Compare between current document list and saved document list.
difference = list(set(curr_doc_titles) - set([doc.title for doc in self.__gui.corpus]))
# Check if there is a difference between two document lists.
if len(difference) == 0:
popup = WarningPopup(self.__gui, 'Updating an index',
'The index is currently up to date!')
popup._start()
else:
extended_corpus = []
for i in range(len(self.__gui.corpus), len(self.__gui.corpus) + len(difference)):
doc_id = 'doc_' + str(i)
doc_title = difference[i - len(self.__gui.corpus)]
doc_content = open(folder_path + '\\' + difference[i - len(self.__gui.corpus)] + '.txt', 'r', encoding='utf-8').read().replace('\n', '')
doc_i = Document(doc_id, doc_title, doc_content)
extended_corpus.append(doc_i)
# Update the corpus and inverted index.
self.__gui.corpus = self.__gui.corpus + extended_corpus
updating_thread = threading.Thread(target=self.__gui._update_inverted_index, args=(extended_corpus,), name='updating_thread')
updating_thread.start()
except EnvironmentError:
popup = WarningPopup(self.__gui, 'Updating an index',
'The file path of the saved index does not exist!')
popup._start()
else:
popup = WarningPopup(self.__gui, 'Updating an index',
'There is no index to be updated!')
popup._start()
def __documentation(self):
"""
The method to show documentation on GitHub.
This feature is currently not available.
Returns
-------
None.
"""
webbrowser.open('https://github.com/williamo1099/Skripsi-2020')
def __about(self):
"""
The method to show an overview of the application.
Returns
-------
None.
"""
self.__about_window = AboutWindow(self.__gui)
self.__about_window._start()
|
app.py
|
#! /usr/bin/env python3
"""
Title: Hoomaluo - Cool
Project: Alternative Cooling Controller
Author: Hoomaluo Labs
Version: 1.0
Date: 08-27-2018
Overview:
* Communicate with a server over MQTT:
- report temperature and energy
* store locally if disconnected
* report local data once back online
- receive control inputs
* Communicate with STM32 uController over RS232:
- send status, temp setpoint, and IR command
- receive power (ac and dc) and temperature as json object
* Read GPIO buttons for temperature setpoints
* Device modes:
1: send ac and dc energy
2: maybe something here for later
Packages:
* paho-mqtt ; for mqtt communication: https://pypi.python.org/pypi/paho-mqtt/1.1
* neo4j ; for later
* pandas
* numpy
* threading
"""
#import csv
#import pandas as pd
from time import sleep,time
import datetime
import paho.mqtt.client as mqtt
import json
import threading
import os
import sys
#from neo4j.v1 import GraphDatabase
from apscheduler.schedulers.background import BackgroundScheduler
from gpiozero import Button
import serial
""" serial handles
ser = serial.Serial('/dev/ttyUSB0') # open serial port
ser.write(b'hello') # write a string
x = ser.read() # read one byte
s = ser.read(10) # read up to ten bytes (timeout)
line = ser.readline() # read a '\n' terminated line
ser.is_open
"""
#def parse_IRcodes(filename):
# """ Parse IR codes from filename and return a dataframe """
# return pd.read_csv(filename)
def c2f(c):
return (9/5)*c+32
def f2c(c):
return (5/9)*(F-32)
class Container:
def __init__(self, serialConnection, IRcodesFile, setpoint=55, status=1):
""" initialize variables """
self.temperature = []
self.ts = int(time())
self.ace_accum = 0
self.dce_accum = 0
self.irms = []
self.vrms = []
self.watts = []
self.ser = serialConnection
with open(IRcodesFile) as json_data:
self.IRcommands = json.load(json_data)
def sendIRcode(self, mode, setpoint, endswith="transmit"):
""" send the IR code """
outString = ""
for val in self.IRcommands[mode][setpoint]:
outString += str(val)+ '?'
outString += endswith
self.sendBytesToSTM(outString.encode("utf-8"))
def sendControls(self, status, tempset):
""" send the temp setpoint and status """
outString = str(status) + '?' + str(tempset) + '?control'
self.sendBytesToSTM(outString.encode("utf-8"))
def sendBytesToSTM(self, byteArray):
if self.ser.is_open:
if debug: print("Serial is open. Sending: ", byteArray)
self.ser.write(byteArray)
else:
try:
self.ser.open()
if debug: print("Serial is open. Sending: ", byteArray)
self.ser.write(byteArray)
except:
if debug: print("Cannot open port.")
""" TODO: need some routine to try again if failed """
def readSTM(self, ser):
"read temp and energy from the STM ... comes in as a json object I think"
while True:
if ser.is_open:
self.processReading(ser.read_until(), int(time())) # adjust character based on code
else:
try:
ser.open()
self.processReading(ser.read_until('\n'), int(time())) # adjust character based on code
except:
if debug: print("Cannot read from port .")
""" TODO: need some routine to try again if failed """
def processReading(self, reading, ts, serialDebug=False):
""" update energy accumulators based on power readings and time interval
Sample:
readings = u'{"temp":70.00,"temp2":70.00,"awatt":-0.01,"ava":-0.01,"apf":1.00,"avrms":73735.22,"airms":18318.55,"awatt2":-0.01,"ava2":-0.01,"apf2":1.00,"avrms2":18318.55,"bwatt":-0.01,"bva":-0.01,"bpf":1.00,"bvrms":73735.22,"birms":18318.55,"bwatt2":-0.01,"bva2":-0.01,"bpf2":1.00,"birms2":18318.55,"cwatt":-0.01,"cva":-0.01,"cpf":1.00,"cvrms":73735.22,"cirms":18318.55,"cwatt2":-0.01,"cva2":-0.01,"cpf2":1.00,"cirms2":18318.55,"dcp":0.00,"dcv":0.01,"dci":0.00,"dcp2":0.00,"dcv2":0.06,"dci2":0.01}'
"""
# convert string to json
if serialDebug:
print(reading)
print(type(reading))
if isinstance(type(reading), str): a = json.loads(reading)
else: a = json.loads(reading.decode("utf-8")) # turn json string into an object
if serialDebug: print(a)
# update temperature
self.temperature.append(a['temp'])
# get time interval
timedelta = ts - self.ts
self.ts = ts
# calculate energies
self.ace_accum = timedelta * (a['awatt'] + a['bwatt']) / 3600.0 # watt-hour
self.dce_accum = timedelta * (a['dcp']) / 3600.0 # watt-hour
self.irms.append(a['airms']+a['birms'])
self.vrms.append(a['avrms']+a['bvrms'])
self.watts.append(a['awatt'] + a['bwatt'])
def resetEnergyAccumulators(self):
self.ace_accum = 0
self.dce_accum = 0
self.irms = []
self.vrms = []
self.watts = []
def resetTempAccumulators(self):
self.temperature = []
class Radio:
def __init__(self, devId, custId, Controller):
self.devId = devId
self.custId = custId
self.controller = Controller
# subscriptions
self.subControls = "maluo_1/set/"+custId+"/"+devId+"/ircontrol"
self.subSettings = "maluo_1/set/"+custId+"/"+devId+"/info"
# publishing
self.pubEnergy = "maluo_1/metering/energy/"+custId+"/"+devId
self.pubTemp = "maluo_1/metering/temperature/"+custId+"/"+devId
self.pubControls = "maluo_1/set/"+custId+"/"+devId+"/ircontrol"
self.storeLocalTemp = False
self.midTemp = 0
self.lastTempPayload = ""
self.storeLocalEnergy = False
self.midEnergy = 0
self.lastEnergyPayload = ""
self.storeLocalControls = False
self.midControls = 0
self.lastControlsPayload = ""
# MQTT client
self.client = mqtt.Client(devId)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
# Need to fix this to attempt reconnect
try:
self.client.connect("post.redlab-iot.net", 55100, 60)
if debug: print("connection established")
except:
if debug: print("connection failed")
self.client.loop_start()
def on_connect(self, client, userdata, flags, rc):
""" Callback function when client connects """
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
sleep(5) # quick delay
self.client.subscribe(self.subControls)
self.client.subscribe(self.subSettings)
def on_publish(self, client, userdata, mid):
""" Callback function that's called on successful delivery (need qos 1 or 2 for this to make sense) """
if debug: print("on_publish ... \nuserdata: ", userdata, "\nmid: ", mid)
if mid == self.midTemp:
self.storeLocalTemp = False
elif mid == self.midEnergy:
self.storeLocalEnergy = False
elif mid == self.midControls:
self.storeLocalControls = False
def on_message(self, client, userdata, msg):
""" Callback function when client receives message """
data = json.loads(msg.payload.decode("utf-8"))
if debug: print("topic: ", msg.topic, " payload:", data)
#print "Received: ", data
if msg.topic == self.subControls:
self.controller.setpoint = int(data['temp'])
status_old = self.controller.status
if data['mode'] == "auto" or data['mode'] == "cool1" or data['mode'] == "cool2" or data['mode'] == "cool3":
self.controller.status = 1
elif data['mode'] == "off":
self.controller.status = 0
if status_old and self.controller.status: onoff = False
elif status_old and not self.controller.status: onoff = True
elif not status_old and self.controller.status: onoff = True
else: onoff = False
self.controller.updateControls(onoff = onoff, radio=False)
elif msg.topic == self.subSettings :
self.controller.temp_interval = int(data['temp-res'])
self.controller.energy_interval = int(data['energy-res'])
self.controller.updateIntervals()
else:
pass
# update
def sendTemperature(self):
""" send measurement to self.pubTemp"""
if len(self.controller.myContainer.temperature) != 0:
temp = sum(self.controller.myContainer.temperature) / len(self.controller.myContainer.temperature)
else:
temp = 0
payload = ('{"ts": '+ str(int(time())) + ', "temp":' + str(temp) +
'"data": { "status": ' + str(self.controller.status) + ', "setpoint": '+ str(self.controller.setpoint) + ' }}' )
res, self.midTemp = self.client.publish(self.pubTemp, payload, qos=1, retain=False)
if debug: print("Sent: ", payload , "on", self.pubTemp, "mid: ", self.midTemp)
self.controller.myContainer.resetTempAccumulators()
filename = self.pubTemp.replace("/", "-") + ".txt"
if self.storeTempLocal:
f = open(filename, 'a+')
f.write(self.lastTempPayload+"\n")
f.close()
self.storeLocalTemp = True
self.lastTempPayload = payload
def sendEnergy(self):
""" send availability to self.pubEnergy """
if len(self.controller.myContainer.vrms) != 0:
vrms = sum(self.controller.myContainer.vrms) / len(self.controller.myContainer.vrms)
irms = sum(self.controller.myContainer.irms) / len(self.controller.myContainer.irms)
watts = sum(self.controller.myContainer.watts) / len(self.controller.myContainer.watts)
else:
vrms = irms = watts = 0
payload = ('{"ts": '+ str(int(time())) + ', "ace": ' + str(self.controller.myContainer.ace_accum)
+ ', "dce": ' + str(self.controller.myContainer.dce_accum)+
', "data": { "watt": ' + str(watts) + ', "vrms": '+ str(vrms) + ', "irms": '+ str(irms) + ' }}' )
res, self.midEnergy = self.client.publish(self.pubEnergy, payload, qos=1, retain=False)
if debug: print("Sent: ", payload , "on", self.pubEnergy, "mid: ", self.midEnergy)
self.controller.myContainer.resetEnergyAccumulators()
filename = self.pubEnergy.replace("/", "-") + ".txt"
if self.storeEnergyLocal:
f = open(filename, 'a+')
f.write(self.lastEnergyPayload+"\n")
f.close()
self.storeLocalEnergy = True
self.lastEnergyPayload = payload
def sendControls(self):
""" send the manual control updates to the server """
if self.controller.status:
mode = '"cool3"'
temp = self.controller.setpoint
else:
mode = '"off"'
temp = self.controller.setpoint
payload = '{"mode": ' + mode + ', "temp": ' + str(temp) + '}'
res, self.midControls = self.client.publish(self.pubControls, payload, qos=1, retain=False)
if debug: print("Sent", payload, "on", self.pubControls, "mid: ", self.midControls)
filename = self.pubTemp.replace("/", "-") + ".txt"
if self.storeControlsLocal:
f = open(filename, 'a+')
f.write(self.lastControlsPayload+"\n")
f.close()
self.storeControlsTemp = True
self.lastControlsPayload = payload
class Controller:
def __init__(self):
#devId = os.environ["devId"]
self.devId = "101" # temporary
self.custId = "101" # temporary
self.serPort = "/dev/ttyACM0" # python -m serial.tools.list_ports
self.ser = serial.Serial(self.serPort) # open serial port
self.status = 1 # will be updated on restart
self.setpoint = 55 # will be updated on restart
self.temp_interval = 1 # 15 min
self.energy_interval = 1 # 15 min
self.myContainer = Container(self.ser, "IRJSON.json", self.setpoint, self.status)
self.myRadio = Radio(self.devId, self.custId, self)
self.scheduler = BackgroundScheduler({'apscheduler.timezone': 'UTC',})
self.addJobs()
self.scheduler.start()
def addJobs(self):
if debug: print("added jobs")
self.tempReporter = self.scheduler.add_job(self.myRadio.sendTemperature,
'interval',
minutes=1)
self.energyReporter = self.scheduler.add_job(self.myRadio.sendEnergy,
'interval',
minutes=self.energy_interval)
def updateControls(self, onoff=False, radio=True):
""" update the control settings """
self.myContainer.sendControls(self.status, self.setpoint)
if onoff and self.status: self.myContainer.sendIRcode("cool3", "62")
elif onoff and not self.status: self.myContainer.sendIRcode("off", "0")
if radio:
self.myRadio.sendControls()
def updateIntervals(self):
""" update the intervals for sending temperature and energy """
for job in self.scheduler.get_jobs():
job.remove()
self.addJobs()
def buttonUpPushed(self):
if debug: print("Up button pushed!")
self.setpoint += 1
self.updateControls()
def buttonDownPushed(self):
if debug: print("Down button pushed!")
self.setpoint -= 1
self.updateControls()
def buttonOnPushed(self):
if debug: print("On button pushed")
self.status = abs(self.status - 1)
self.updateControls(True)
def main():
global debug
debug = True
myController = Controller()
onButton = Button(5)
upButton = Button(11)
downButton = Button(9)
onButton.when_pressed = myController.buttonOnPushed
upButton.when_pressed = myController.buttonUpPushed
downButton.when_pressed = myController.buttonDownPushed
ser_thread = threading.Thread(target=myController.myContainer.readSTM, args = [myController.ser])
print("start serial read thread")
ser_thread.start()
try:
while True:
sleep(10)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
myController.scheduler.shutdown()
if __name__ == "__main__":
main()
|
misc.py
|
"""
Misc module contains stateless functions that could be used during pytest execution,
or outside during setup/teardown of the integration tests environment.
"""
import contextlib
import errno
import multiprocessing
import os
import shutil
import stat
import subprocess
import sys
import tempfile
import time
from distutils.version import LooseVersion
import pkg_resources
import requests
from OpenSSL import crypto
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, NoEncryption
from six.moves import socketserver, SimpleHTTPServer
from acme import crypto_util
RSA_KEY_TYPE = 'rsa'
ECDSA_KEY_TYPE = 'ecdsa'
def check_until_timeout(url):
"""
Wait and block until given url responds with status 200, or raise an exception
after 150 attempts.
:param str url: the URL to test
:raise ValueError: exception raised after 150 unsuccessful attempts to reach the URL
"""
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
for _ in range(0, 150):
time.sleep(1)
try:
if requests.get(url, verify=False).status_code == 200:
return
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after 150 attempts: {0}'.format(url))
class GracefulTCPServer(socketserver.TCPServer):
"""
This subclass of TCPServer allows graceful reuse of an address that has
just been released by another instance of TCPServer.
"""
allow_reuse_address = True
@contextlib.contextmanager
def create_http_server(port):
"""
Setup and start an HTTP server for the given TCP port.
This server stays active for the lifetime of the context, and is automatically
stopped with context exit, while its temporary webroot is deleted.
:param int port: the TCP port to use
:return str: the temporary webroot attached to this server
"""
current_cwd = os.getcwd()
webroot = tempfile.mkdtemp()
def run():
GracefulTCPServer(('', port), SimpleHTTPServer.SimpleHTTPRequestHandler).serve_forever()
process = multiprocessing.Process(target=run)
try:
# SimpleHTTPServer is designed to serve files from the current working directory at the
# time it starts. So we temporarily change the cwd to our crafted webroot before launch.
try:
os.chdir(webroot)
process.start()
finally:
os.chdir(current_cwd)
check_until_timeout('http://localhost:{0}/'.format(port))
yield webroot
finally:
try:
if process.is_alive():
process.terminate()
process.join() # Block until process is effectively terminated
finally:
shutil.rmtree(webroot)
def list_renewal_hooks_dirs(config_dir):
"""
Find and return paths of all hook directories for the given certbot config directory
:param str config_dir: path to the certbot config directory
:return str[]: list of path to the standard hooks directory for this certbot instance
"""
renewal_hooks_root = os.path.join(config_dir, 'renewal-hooks')
return [os.path.join(renewal_hooks_root, item) for item in ['pre', 'deploy', 'post']]
def generate_test_file_hooks(config_dir, hook_probe):
"""
Create a suite of certbot hook scripts and put them in the relevant hook directory
for the given certbot configuration directory. These scripts, when executed, will write
specific verbs in the given hook_probe file to allow asserting they have effectively
been executed. The deploy hook also checks that the renewal environment variables are set.
:param str config_dir: current certbot config directory
:param hook_probe: path to the hook probe to test hook scripts execution
"""
if sys.platform == 'win32':
extension = 'bat'
else:
extension = 'sh'
renewal_hooks_dirs = list_renewal_hooks_dirs(config_dir)
renewal_deploy_hook_path = os.path.join(renewal_hooks_dirs[1], 'hook.sh')
for hook_dir in renewal_hooks_dirs:
# We want an equivalent of bash `chmod -p $HOOK_DIR, that does not fail if one folder of
# the hierarchy already exists. It is not the case of os.makedirs. Python 3 has an
# optional parameter `exists_ok` to not fail on existing dir, but Python 2.7 does not.
# So we pass through a try except pass for it. To be removed with dropped support on py27.
try:
os.makedirs(hook_dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
hook_path = os.path.join(hook_dir, 'hook.{0}'.format(extension))
if extension == 'sh':
data = '''\
#!/bin/bash -xe
if [ "$0" = "{0}" ]; then
if [ -z "$RENEWED_DOMAINS" -o -z "$RENEWED_LINEAGE" ]; then
echo "Environment variables not properly set!" >&2
exit 1
fi
fi
echo $(basename $(dirname "$0")) >> "{1}"\
'''.format(renewal_deploy_hook_path, hook_probe)
else:
# TODO: Write the equivalent bat file for Windows
data = '''\
'''
with open(hook_path, 'w') as file:
file.write(data)
os.chmod(hook_path, os.stat(hook_path).st_mode | stat.S_IEXEC)
@contextlib.contextmanager
def manual_http_hooks(http_server_root, http_port):
"""
Generate suitable http-01 hooks command for test purpose in the given HTTP
server webroot directory. These hooks command use temporary python scripts
that are deleted upon context exit.
:param str http_server_root: path to the HTTP server configured to serve http-01 challenges
:param int http_port: HTTP port that the HTTP server listen on
:return (str, str): a tuple containing the authentication hook and cleanup hook commands
"""
tempdir = tempfile.mkdtemp()
try:
auth_script_path = os.path.join(tempdir, 'auth.py')
with open(auth_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import requests
import time
import sys
challenge_dir = os.path.join('{0}', '.well-known', 'acme-challenge')
os.makedirs(challenge_dir)
challenge_file = os.path.join(challenge_dir, os.environ.get('CERTBOT_TOKEN'))
with open(challenge_file, 'w') as file_h:
file_h.write(os.environ.get('CERTBOT_VALIDATION'))
url = 'http://localhost:{1}/.well-known/acme-challenge/' + os.environ.get('CERTBOT_TOKEN')
for _ in range(0, 10):
time.sleep(1)
try:
if request.get(url).status_code == 200:
sys.exit(0)
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after 10 attempts: {{0}}'.format(url))
'''.format(http_server_root, http_port))
os.chmod(auth_script_path, 0o755)
cleanup_script_path = os.path.join(tempdir, 'cleanup.py')
with open(cleanup_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import shutil
well_known = os.path.join('{0}', '.well-known')
shutil.rmtree(well_known)
'''.format(http_server_root))
os.chmod(cleanup_script_path, 0o755)
yield ('{0} {1}'.format(sys.executable, auth_script_path),
'{0} {1}'.format(sys.executable, cleanup_script_path))
finally:
shutil.rmtree(tempdir)
def get_certbot_version():
"""
Find the version of the certbot available in PATH.
:return str: the certbot version
"""
output = subprocess.check_output(['certbot', '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
# Typical response is: output = 'certbot 0.31.0.dev0'
version_str = output.split(' ')[1].strip()
return LooseVersion(version_str)
def generate_csr(domains, key_path, csr_path, key_type=RSA_KEY_TYPE):
"""
Generate a private key, and a CSR for the given domains using this key.
:param domains: the domain names to include in the CSR
:type domains: `list` of `str`
:param str key_path: path to the private key that will be generated
:param str csr_path: path to the CSR that will be generated
:param str key_type: type of the key (misc.RSA_KEY_TYPE or misc.ECDSA_KEY_TYPE)
"""
if key_type == RSA_KEY_TYPE:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
elif key_type == ECDSA_KEY_TYPE:
key = ec.generate_private_key(ec.SECP384R1(), default_backend())
key = key.private_bytes(encoding=Encoding.PEM, format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption())
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
else:
raise ValueError('Invalid key type: {0}'.format(key_type))
key_bytes = crypto.dump_privatekey(crypto.FILETYPE_PEM, key)
with open(key_path, 'wb') as file:
file.write(key_bytes)
csr_bytes = crypto_util.make_csr(key_bytes, domains)
with open(csr_path, 'wb') as file:
file.write(csr_bytes)
def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version of it (TEXT mode).
:param str cert_path: the path to the certificate
:returns: the TEXT version of the certificate, as it would be displayed by openssl binary
"""
with open(cert_path, 'rb') as file:
data = file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, data)
return crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode('utf-8')
def load_sample_data_path(workspace):
"""
Load the certbot configuration example designed to make OCSP tests, and return its path
:param str workspace: current test workspace directory path
:returns: the path to the loaded sample data directory
:rtype: str
"""
original = pkg_resources.resource_filename('certbot_integration_tests', 'assets/sample-config')
copied = os.path.join(workspace, 'sample-config')
shutil.copytree(original, copied, symlinks=True)
return copied
|
test_datautils_align.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module test align in aby3 module.
"""
import unittest
import multiprocessing as mp
import paddle_fl.mpc.data_utils.alignment as alignment
class TestDataUtilsAlign(unittest.TestCase):
@staticmethod
def run_align(input_set, party_id, endpoints, is_receiver, ret_list):
"""
Call align function in data_utils.
:param input_set:
:param party_id:
:param endpoints:
:param is_receiver:
:return:
"""
result = alignment.align(input_set=input_set,
party_id=party_id,
endpoints=endpoints,
is_receiver=is_receiver)
ret_list.append(result)
def test_align(self):
"""
Test normal case for align function.
:return:
"""
endpoints = '0:127.0.0.1:11111,1:127.0.0.1:22222,2:127.0.0.1:33333'
set_0 = {'0', '10', '20', '30'}
set_1 = {'0', '10', '11', '111'}
set_2 = {'0', '30', '33', '333'}
mp.set_start_method('spawn')
manager = mp.Manager()
ret_list = manager.list()
party_0 = mp.Process(target=self.run_align, args=(set_0, 0, endpoints, True, ret_list))
party_1 = mp.Process(target=self.run_align, args=(set_1, 1, endpoints, False, ret_list))
party_2 = mp.Process(target=self.run_align, args=(set_2, 2, endpoints, False, ret_list))
party_1.start()
party_2.start()
party_0.start()
party_0.join()
party_1.join()
party_2.join()
self.assertEqual(3, len(ret_list))
self.assertEqual(ret_list[0], ret_list[1])
self.assertEqual(ret_list[0], ret_list[2])
self.assertEqual({'0'}, ret_list[0])
if __name__ == '__main__':
unittest.main()
|
api_test.py
|
import datetime
import json
import io
import os
import re
import shutil
import socket
import tempfile
import threading
import time
import unittest
import docker
from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
from . import fake_api
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
res.raw = raw
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
def fake_read_from_socket(self, response, stream, tty=False):
return six.binary_type()
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.api.client.APIClient',
get=fake_get,
post=fake_post,
put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
self.patcher.stop()
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
APIClient(version=1.12)
assert str(
excinfo.value
) == 'Version parameter must be a string or None. Found float'
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
assert url == '{0}{1}'.format(
url_prefix, 'hello/somename/world/someothername'
)
url = self.client._url('/hello/{0}/world', 'some?name')
assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
url = self.client._url("/images/{0}/push", "localhost:5000/image")
assert url == '{0}{1}'.format(
url_prefix, 'images/localhost:5000/image/push'
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
assert url == '{0}{1}'.format(url_prefix, 'simple')
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = APIClient(version="auto")
assert isinstance(client._version, six.string_types)
assert not (client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
assert isinstance(version, six.string_types)
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_login(self):
self.client.login('sakuya', 'izayoi')
args = fake_request.call_args
assert args[0][0] == 'POST'
assert args[0][1] == url_prefix + 'auth'
assert json.loads(args[1]['data']) == {
'username': 'sakuya', 'password': 'izayoi'
}
assert args[1]['headers'] == {'Content-Type': 'application/json'}
assert self.client._auth_configs['auths'] == {
'docker.io': {
'email': None,
'password': 'izayoi',
'username': 'sakuya',
'serveraddress': None,
}
}
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True,
timeout=None
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True,
timeout=None
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True,
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
assert 'SecurityOpt' in result
assert result['SecurityOpt'] == security_opt
with pytest.raises(TypeError):
self.client.create_host_config(security_opt='wrong')
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
if six.PY3:
content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
assert result == content_str
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
assert result == content
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
assert result == content_str.decode('utf-8')
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
assert result == content
class StreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with APIClient(base_url="http+unix://" + self.socket_file) as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
assert list(stream) == [
str(i).encode() for i in range(50)]
class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
self.mock_send = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_default_user_agent(self):
client = APIClient()
client.version()
assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
expected = 'docker-sdk-python/%s' % docker.__version__
assert headers['User-Agent'] == expected
def test_custom_user_agent(self):
client = APIClient(user_agent='foo/bar')
client.version()
assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
assert headers['User-Agent'] == 'foo/bar'
class DisableSocketTest(unittest.TestCase):
class DummySocket(object):
def __init__(self, timeout=60):
self.timeout = timeout
def settimeout(self, timeout):
self.timeout = timeout
def gettimeout(self):
return self.timeout
def setUp(self):
self.client = APIClient()
def test_disable_socket_timeout(self):
"""Test that the timeout is disabled on a generic socket object."""
socket = self.DummySocket()
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
def test_disable_socket_timeout2(self):
"""Test that the timeouts are disabled on a generic socket object
and it's _sock object if present."""
socket = self.DummySocket()
socket._sock = self.DummySocket()
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
assert socket._sock.timeout is None
def test_disable_socket_timout_non_blocking(self):
"""Test that a non-blocking socket does not get set to blocking."""
socket = self.DummySocket()
socket._sock = self.DummySocket(0.0)
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
assert socket._sock.timeout == 0.0
|
opencv_multiprocessing.py
|
import multiprocessing
import cv2
queue_from_cam = multiprocessing.Queue()
def cam_loop(queue_from_cam):
print('initializing cam')
cap = cv2.VideoCapture(0)
while True:
print('querying frame')
hello, img = cap.read()
print('queueing image')
queue_from_cam.put(img)
print('cam_loop done')
cam_process = multiprocessing.Process(target=cam_loop,args=(queue_from_cam,))
cam_process.start()
while queue_from_cam.empty():
pass
print('getting image')
from_queue = queue_from_cam.get()
# print('saving image')
# cv2.imwrite('temp.png', from_queue)
# print('image saved')
while True:
print("showing image")
from_queue = queue_from_cam.get()
cv2.imshow('img', from_queue)
if cv2.waitKey(30) >= 0:
break
cv2.destroyAllWindows()
cam_process.terminate()
# =====================================================================================
# import multiprocessing
# import cv2
#
# queue_from_cam = multiprocessing.Queue()
#
# def cam_loop(queue_from_cam):
# print('initializing cam')
# cap = cv2.VideoCapture(0)
# print('querying frame')
# hello, img = cap.read()
# print('queueing image')
# queue_from_cam.put(img)
# print('cam_loop done')
#
# cam_process = multiprocessing.Process(target=cam_loop,args=(queue_from_cam,))
# cam_process.start()
#
# while queue_from_cam.empty():
# pass
#
# print('getting image')
# from_queue = queue_from_cam.get()
# print('saving image')
# cv2.imwrite('temp.png', from_queue)
# print('image saved')
|
Golestan_Crawler.py
|
from pydoc import text
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.common.utils import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
from unidecode import unidecode
from itertools import cycle
import os
import json
import Data_Handler
import telegram
from threading import Thread
import emoji
import winsound
duration = 1000 # milliseconds
freq = 440
DEFAULT_PORT = 0
DEFAULT_SERVICE_LOG_PATH = None
DEFAULT_KEEP_ALIVE = None
options = webdriver.ChromeOptions()
options.add_experimental_option("detach", True)
paths = {
'Data' : os.path.join(os.getcwd(), 'Data'),
}
def zip_equal(iter1,iter2):
return zip(iter1,cycle(iter2)) if len(iter1) > len(iter2) else zip(cycle(iter1),iter2)
def Mymap(iter):
check_mark = emoji.emojize(":check_mark_button:")
iter2 = []
string = ''
len_iter = len(iter)
for x in range(len_iter):
string += check_mark + iter[x]+"\n"
if x % 10 == 0:
iter2.append(string)
string = ''
iter2.append(string)
iter2[1] = iter2[0] + iter2[1]
iter2.pop(0)
return iter2
class Golestan_Crawler(webdriver.Chrome):
def __init__(self, executable_path, port=DEFAULT_PORT,
options = None, service_args=None,
desired_capabilities=None, service_log_path=DEFAULT_SERVICE_LOG_PATH,
chrome_options=None, service = None, keep_alive=DEFAULT_KEEP_ALIVE) -> None:
super().__init__( executable_path, port=DEFAULT_PORT,
options = None, service_args=None,
desired_capabilities=None, service_log_path=DEFAULT_SERVICE_LOG_PATH,
chrome_options=None, service = None, keep_alive=DEFAULT_KEEP_ALIVE)
self.user = os.environ.get("id")
self.password = os.environ.get("pass")
self.xpaths = {
'user' : '/html/body/div/table[2]/tbody/tr[2]/td[2]/input',
'password' : '/html/body/div/table[2]/tbody/tr[3]/td[2]/input',
'Captcha' : '/html/body/div/table[2]/tbody/tr[4]/td[2]/input',
'iframe' : '/html/body/div[4]/div/iframe',
'infrmset-frame' : '/html/frameset/frameset',
'Master-Frame' : '/html/frameset/frameset/frame[2]',
'Form_Body-frame' : '/html/frameset/frame[2]',
'Form_Body-frame2' : '/html/frameset/frame[3]',
'report-finder' : '/html/body/table[1]/tbody/tr[8]/td[3]/table/tbody/tr/td[8]/input',
'collage-code' : '/html/body/div[1]/div[2]/table/tbody/tr[4]/td[12]/input',
'page-number-textbox' : '/html/body/table/tbody/tr/td[4]/input',
'forward-buttom' :'/html/body/table/tbody/tr/td[5]/input',
'backward-bottom' : '/html/body/table/tbody/tr/td[3]/input',
'Close-button' : '/html/body/div[3]/div[4]/img',
}
self.Lessons = {
'Lesson-Code' : [],
'Lesson-Name' : [],
'Lesson-Weight' : [],
'Lesson-A-Weight' : [],
'Capacity' : [],
'Registered' : [],
'Queue' : [],
'Sex' : [],
'Teacher' : [],
'schedule' : [],
'Exam-schedule' : [],
'Abandon' : [],
'Specification' : [],
'Anti-Lesson' : [],
'Presentation-Mode' : [],
'Offline-Online' : [],
'Description' : [],
}
def Logging(self):
self.golestan = self.get("https://golestan.kntu.ac.ir/forms/authenticateuser/main.htm")
self.captcha = input('enter captcha = ')
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,self.xpaths['iframe'])))
#WebDriverWait(self, 20).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,self.xpaths['infrmset-frame'])))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,self.xpaths['Master-Frame'])))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,self.xpaths['Form_Body-frame'])))
#file = open("source.txt", "w",encoding="utf-8")
#file.write(self.page_source)
WebDriverWait(self, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, "txt")))
self.find_elements(By.CLASS_NAME, "txt")[2].send_keys(self.user, Keys.TAB, self.password, Keys.TAB, self.captcha ,Keys.ENTER)
self.switch_to.default_content()
def Get_to_Lessons(self,log_in : bool = True,i=3):
if log_in == True:
self.Logging()
else:
pass
sleep(0.5)
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME, "Faci2")))
#WebDriverWait(self, 20).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,self.xpaths['infrmset-frame'])))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME,"Master")))
WebDriverWait(self, 20).until(EC.frame_to_be_available_and_switch_to_it((By.NAME, "Form_Body")))
sleep(0.5)
WebDriverWait(self, 10).until(EC.element_to_be_clickable((By.XPATH, self.xpaths['report-finder'])))
sleep(2)
if log_in ==True:
self.find_element(By.XPATH, self.xpaths['report-finder']).send_keys("102", Keys.ENTER ,Keys.ENTER)
else:
self.find_element(By.XPATH, self.xpaths['report-finder']).clear()
self.find_element(By.XPATH, self.xpaths['report-finder']).send_keys("102", Keys.ENTER ,Keys.ENTER)
self.switch_to.default_content()
sleep(0.5)
##FACI number are increasing
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME, f"Faci{i}")))
#WebDriverWait(self, 20).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,self.xpaths['infrmset-frame'])))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME,"Master")))
sleep(2)
WebDriverWait(self, 20).until(EC.frame_to_be_available_and_switch_to_it((By.NAME, "Form_Body")))
WebDriverWait(self, 10).until(EC.element_to_be_clickable((By.XPATH, self.xpaths['collage-code'])))
self.find_element(By.XPATH, self.xpaths['collage-code']).send_keys("20","11","192","02", Keys.ENTER)
self.switch_to.default_content()
def Get_Lessons(self,i=3):
#sleep(0.5)
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME, f"Faci{i}")))
#WebDriverWait(self, 20).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,self.xpaths['infrmset-frame'])))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME,"Master")))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME, "Header")))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.ID, "Form_Body")))
WebDriverWait(self, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "CTDData")))
sleep(0.2)
file = open("source.txt", "a",encoding="utf-8")
self.Table_Data = self.find_elements_by_class_name('CTDData')
self.Page_Data = self.Parse_Lessons()
self.switch_to.default_content()
self.Lessons = {
'Lesson-Code' : [],
'Lesson-Name' : [],
'Lesson-Weight' : [],
'Lesson-A-Weight' : [],
'Capacity' : [],
'Registered' : [],
'Queue' : [],
'Sex' : [],
'Teacher' : [],
'schedule' : [],
'Exam-schedule' : [],
'Abandon' : [],
'Specification' : [],
'Anti-Lesson' : [],
'Presentation-Mode' : [],
'Offline-Online' : [],
'Description' : [],
}
return self.Page_Data
## Under Construction (look in your phone safari)##
def Parse_Lessons(self):
for element, dict_key in zip_equal(self.Table_Data, self.Lessons):
if dict_key == 'Lesson-Weight' or dict_key =='Lesson-A-Weight' or dict_key == 'Capacity' or dict_key =='Registered' or dict_key == 'Queue':
if element.text != '':
self.Lessons[dict_key].append(int(unidecode(element.text)))
else:
self.Lessons[dict_key].append(None)
elif dict_key == 'Lesson-Code':
if element.text != '':
self.Lessons[dict_key].append(int(unidecode(element.text[:-3]+element.text[-2:])))
else:
self.Lessons[dict_key].append(None)
else:
self.Lessons[dict_key].append(element.text)
return self.Lessons
def Page_Controller(self,i=3):
forward = True
backward = False
page_number_new = 1
while True:
while True:
self.switch_to.default_content()
try:
self.Page_as_dict = self.Get_Lessons(i=i)
with open(os.path.join(paths['Data'], f'Page-Data-{page_number_new}.json'),'w',encoding='utf-8') as outfile:
json.dump(self.Page_as_dict, outfile)
outfile.close()
break
except Exception as e:
print(e)
try:
self.switch_to.default_content()
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME, f"Faci{i}")))
WebDriverWait(self, 10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME,"Commander")))
WebDriverWait(self, 10).until(EC.element_to_be_clickable((By.XPATH, self.xpaths['forward-buttom'])))
WebDriverWait(self, 10).until(EC.element_to_be_clickable((By.XPATH, self.xpaths['backward-bottom'])))
WebDriverWait(self, 10).until(EC.presence_of_element_located((By.XPATH, self.xpaths['page-number-textbox'])))
self.f_button = self.find_element_by_xpath(self.xpaths['forward-buttom'])
self.b_button = self.find_element_by_xpath(self.xpaths['backward-bottom'])
self.page_number_text_box = self.find_element_by_xpath(self.xpaths['page-number-textbox'])
page_number_old = self.page_number_text_box.get_attribute('value')
if forward == True and backward == False:
self.f_button.click()
elif forward == False and backward == True:
self.b_button.click()
sleep(0.1)
page_number_new = self.page_number_text_box.get_attribute('value')
if page_number_new == page_number_old == "1":
forward = True
backward = False
elif page_number_new == page_number_old != "1":
backward = True
forward = False
break
except Exception as e:
print(e)
self.switch_to.default_content()
self.find_element(By.XPATH,self.xpaths['Close-button']).click()
def Threading_Crawler(self):
i= 3
self.Get_to_Lessons(i=i)
self.Page_Controller(i=i)
while True:
i += 1
sleep(0.5)
self.Old_DataFrame = Data_Handler.DataFrame_Build()
while True:
try:
self.Get_to_Lessons(log_in=False,i=i)
self.Page_Controller(i=i)
break
except Exception as e:
print(e)
self.switch_to.default_content()
self.New_DataFrame = Data_Handler.DataFrame_Build()
if i >= 4:
Thread(target=self.message_Sender).start()
def message_Sender(self):
TOKEN = os.environ.get("TOKEN")
O_DataFrame = self.Old_DataFrame
N_DataFrame = self.New_DataFrame
Bot = telegram.Bot(token=TOKEN)
self.report = Data_Handler.reporter(N_DataFrame,O_DataFrame)
if self.report == []:
winsound.Beep(freq, duration)
for i in list(N_DataFrame['Lesson-Code']):
self.report.append(Data_Handler.Capacity_Report(N_DataFrame,i))
self.report = Mymap(self.report)
for item in self.report:
try:
print(Bot.send_message(chat_id="@Golestan_Updates",text=item,parse_mode=telegram.ParseMode.HTML))
sleep(3)
except Exception as e:
cool_down = str(e).split(" ")[-2]
try:
sleep(float(cool_down))
print(Bot.send_message(chat_id="@Golestan_Updates",text=item,parse_mode=telegram.ParseMode.HTML))
except:
pass
class RegistrationClass(Golestan_Crawler):
def __init__(self, executable_path, port=DEFAULT_PORT, options=None, service_args=None, desired_capabilities=None, service_log_path=DEFAULT_SERVICE_LOG_PATH, chrome_options=None, service=None, keep_alive=DEFAULT_KEEP_ALIVE) -> None:
super().__init__(executable_path, port, options, service_args, desired_capabilities, service_log_path, chrome_options, service, keep_alive)
self.objects = {
'New-Row-Button' : '/html/body/table/tbody/tr/td[2]/input',
'Change-Group-Button' : '/html/body/table/tbody/tr/td[4]/input', #remember in tr[{i}] i start from 2
'Requesd-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[1]',
'Status-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[2]',
'Group-text-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[3]',
'Lesson-Code-3-text-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[3]',
'Lesson-Code-2-text-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[4]',
'Lesson-Code-1-text-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[5]',
'Lesson-Name-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[6]',
'Lesson-Weight-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[7]',
'Lesson-Weight-A-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[8]',
'Lesson-Kind-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[9]',
'Step-Of-Registration-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[10]',
'Description-Cell' : '/html/body/div[3]/table/tbody/tr[{i}]/td[11]',
}
if __name__ == "__main__":
#crawler = Golestan_Crawler("chromedriver.exe")
#crawler.Threading_Crawler()
"""df = Data_Handler.DataFrame_Build()
it = []
for i in list(df['Lesson-Code']):
it.append(Data_Handler.Capacity_Report(df,i))
it = Mymap(it)
for x in it:
print(x)"""
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
_reline(destpath)
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
_reline(destpath)
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
test__monkey_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
from gevent import monkey
monkey.patch_all()
from gevent import queue as Queue
import threading
import time
import unittest
QUEUE_SIZE = 5
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
#self.startedEvent = threading.Event()
from gevent.event import Event
self.startedEvent = Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.01)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin(object):
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.isSet():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self, block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
with self.assertRaises(expected_exception_class):
block_func(*block_args)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.isSet():
self.fail("trigger thread ended but event never set")
class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
type2test = Queue.Queue
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if not q.empty():
raise RuntimeError("Call this function with an empty queue")
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
q.put(444)
target_first_items = dict(
Queue=111,
LifoQueue=444,
PriorityQueue=111)
actual_first_item = (q.peek(), q.get())
self.assertEqual(actual_first_item,
(target_first_items[q.__class__.__name__],
target_first_items[q.__class__.__name__]),
"q.peek() and q.get() are not equal!")
target_order = dict(Queue=[333, 222, 444],
LifoQueue=[222, 333, 111],
PriorityQueue=[222, 333, 444])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertFalse(q.empty(), "Queue should not be empty")
self.assertFalse(q.full(), "Queue should not be full")
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
try:
q.put(888, block=0)
self.fail("Didn't appear to block with a full queue")
except Queue.Full:
pass
try:
q.put(888, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except Queue.Full:
pass
self.assertEqual(q.qsize(), QUEUE_SIZE)
# Test a blocking put
self.do_blocking_test(q.put, (888,), q.get, ())
self.do_blocking_test(q.put, (888, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except Queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except Queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x is None:
q.task_done()
return
#with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0, 1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0, 1):
q.put(None) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclasses
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclass
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_task_done_with_items(self):
# Passing items to the constructor allows for as
# many task_done calls. Joining before all the task done
# are called returns false
# XXX the same test in subclass
l = [1, 2, 3]
q = Queue.JoinableQueue(items=l)
for i in l:
self.assertFalse(q.join(timeout=0.001))
self.assertEqual(i, q.get())
q.task_done()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
self.assertTrue(q.join(timeout=0.001))
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
class LifoQueueTest(BaseQueueTest):
type2test = Queue.LifoQueue
class PriorityQueueTest(BaseQueueTest):
type2test = Queue.PriorityQueue
def test__init(self):
item1 = (2, 'b')
item2 = (1, 'a')
q = self.type2test(items=[item1, item2])
self.assertTupleEqual(item2, q.get_nowait())
self.assertTupleEqual(item1, q.get_nowait())
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(Queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return Queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return Queue.Queue._get(self)
class FailingQueueTest(unittest.TestCase, BlockingTestMixin):
def failing_queue_test(self, q):
if not q.empty():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
with self.assertRaises(FailingQueueException):
q.put("oops", block=0)
q.fail_next_put = True
with self.assertRaises(FailingQueueException):
q.put("oops", timeout=0.1)
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
with self.assertRaises(FailingQueueException):
self.do_blocking_test(q.put, (888,), q.get, ())
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put(999)
# Test a failing timeout put
q.fail_next_put = True
self.do_exceptional_blocking_test(q.put, (888, True, 10), q.get, (),
FailingQueueException)
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
q.get()
self.assertFalse(q.full(), "Queue should not be full")
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, (888,), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
with self.assertRaises(FailingQueueException):
q.get()
self.assertFalse(q.empty(), "Queue should not be empty")
q.fail_next_get = True
with self.assertRaises(FailingQueueException):
q.get(timeout=0.1)
self.assertFalse(q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.fail_next_get = True
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
# put succeeded, but get failed.
self.assertFalse(q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
if __name__ == "__main__":
unittest.main()
|
ProjE_sigmoid.py
|
import argparse
import math
import os.path
import timeit
from multiprocessing import JoinableQueue, Queue, Process
import numpy as np
import tensorflow as tf
class ProjE:
@property
def n_entity(self):
return self.__n_entity
@property
def n_train(self):
return self.__train_triple.shape[0]
@property
def trainable_variables(self):
return self.__trainable
@property
def hr_t(self):
return self.__hr_t
@property
def train_hr_t(self):
return self.__train_hr_t
@property
def train_tr_h(self):
return self.__train_tr_h
@property
def tr_h(self):
return self.__tr_h
@property
def ent_embedding(self):
return self.__ent_embedding
@property
def rel_embedding(self):
return self.__rel_embedding
def training_data(self, batch_size=100):
n_triple = len(self.__train_triple)
rand_idx = np.random.permutation(n_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
hr_tlist, hr_tweight, tr_hlist, tr_hweight = self.corrupted_training(
self.__train_triple[rand_idx[start:end]])
yield hr_tlist, hr_tweight, tr_hlist, tr_hweight
start = end
def raw_training_data(self, batch_size=100):
n_triple = len(self.__train_triple)
rand_idx = np.random.permutation(n_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__train_triple[rand_idx[start:end]]
start = end
def testing_data(self, batch_size=100):
n_triple = len(self.__test_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__test_triple[start:end, :]
start = end
def validation_data(self, batch_size=100):
n_triple = len(self.__valid_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__valid_triple[start:end, :]
start = end
def corrupted_training(self, htr):
# [head(tail), relation, #of_total_positive_candidates, positive_instances..., negative_instances...]
hr_tlist = list()
hr_tweight = list()
tr_hlist = list()
tr_hweight = list()
for idx in range(htr.shape[0]):
if np.random.uniform(-1, 1) > 0: # t r predict h
tr_hweight.append(
[1. if x in self.__tr_h[htr[idx, 1]][htr[idx, 2]] else 0. for x in range(self.__n_entity)])
tr_hlist.append([htr[idx, 1], htr[idx, 2]])
else: # h r predict t
hr_tweight.append(
[1. if x in self.__hr_t[htr[idx, 0]][htr[idx, 2]] else 0. for x in range(self.__n_entity)])
hr_tlist.append([htr[idx, 0], htr[idx, 2]])
return np.asarray(hr_tlist, dtype=np.int32), np.asarray(hr_tweight, dtype=np.float32), \
np.asarray(tr_hlist, dtype=np.int32), np.asarray(tr_hweight, dtype=np.float32)
def __init__(self, data_dir, embed_dim=100, combination_method='simple', dropout=0.5, neg_weight=0.5):
if combination_method.lower() not in ['simple', 'matrix']:
raise NotImplementedError("ProjE does not support using %s as combination method." % combination_method)
self.__combination_method = combination_method
self.__embed_dim = embed_dim
self.__initialized = False
self.__trainable = list()
self.__dropout = dropout
with open(os.path.join(data_dir, 'entity2id.txt'), 'r', encoding='utf-8') as f:
self.__n_entity = len(f.readlines())
with open(os.path.join(data_dir, 'entity2id.txt'), 'r', encoding='utf-8') as f:
self.__entity_id_map = {x.strip().split('\t')[0]: int(x.strip().split('\t')[1]) for x in f.readlines()}
self.__id_entity_map = {v: k for k, v in self.__entity_id_map.items()}
print("N_ENTITY: %d" % self.__n_entity)
with open(os.path.join(data_dir, 'relation2id.txt'), 'r', encoding='utf-8') as f:
self.__n_relation = len(f.readlines())
with open(os.path.join(data_dir, 'relation2id.txt'), 'r', encoding='utf-8') as f:
self.__relation_id_map = {x.strip().split('\t')[0]: int(x.strip().split('\t')[1]) for x in f.readlines()}
self.__id_relation_map = {v: k for k, v in self.__entity_id_map.items()}
print("N_RELATION: %d" % self.__n_relation)
def load_triple(file_path):
with open(file_path, 'r', encoding='utf-8') as f_triple:
return np.asarray([[self.__entity_id_map[x.strip().split('\t')[0]],
self.__entity_id_map[x.strip().split('\t')[1]],
self.__relation_id_map[x.strip().split('\t')[2]]] for x in f_triple.readlines()],
dtype=np.int32)
def gen_hr_t(triple_data):
hr_t = dict()
for h, t, r in triple_data:
if h not in hr_t:
hr_t[h] = dict()
if r not in hr_t[h]:
hr_t[h][r] = set()
hr_t[h][r].add(t)
return hr_t
def gen_tr_h(triple_data):
tr_h = dict()
for h, t, r in triple_data:
if t not in tr_h:
tr_h[t] = dict()
if r not in tr_h[t]:
tr_h[t][r] = set()
tr_h[t][r].add(h)
return tr_h
self.__train_triple = load_triple(os.path.join(data_dir, 'train.txt'))
print("N_TRAIN_TRIPLES: %d" % self.__train_triple.shape[0])
self.__test_triple = load_triple(os.path.join(data_dir, 'test.txt'))
print("N_TEST_TRIPLES: %d" % self.__test_triple.shape[0])
self.__valid_triple = load_triple(os.path.join(data_dir, 'valid.txt'))
print("N_VALID_TRIPLES: %d" % self.__valid_triple.shape[0])
self.__train_hr_t = gen_hr_t(self.__train_triple)
self.__train_tr_h = gen_tr_h(self.__train_triple)
self.__test_hr_t = gen_hr_t(self.__test_triple)
self.__test_tr_h = gen_tr_h(self.__test_triple)
self.__hr_t = gen_hr_t(np.concatenate([self.__train_triple, self.__test_triple, self.__valid_triple], axis=0))
self.__tr_h = gen_tr_h(np.concatenate([self.__train_triple, self.__test_triple, self.__valid_triple], axis=0))
bound = 6 / math.sqrt(embed_dim)
with tf.device('/cpu'):
self.__ent_embedding = tf.get_variable("ent_embedding", [self.__n_entity, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=345))
self.__trainable.append(self.__ent_embedding)
self.__rel_embedding = tf.get_variable("rel_embedding", [self.__n_relation, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=346))
self.__trainable.append(self.__rel_embedding)
if combination_method.lower() == 'simple':
self.__hr_weighted_vector = tf.get_variable("simple_hr_combination_weights", [embed_dim * 2],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=445))
self.__tr_weighted_vector = tf.get_variable("simple_tr_combination_weights", [embed_dim * 2],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=445))
self.__trainable.append(self.__hr_weighted_vector)
self.__trainable.append(self.__tr_weighted_vector)
self.__hr_combination_bias = tf.get_variable("combination_bias_hr",
initializer=tf.zeros([embed_dim]))
self.__tr_combination_bias = tf.get_variable("combination_bias_tr",
initializer=tf.zeros([embed_dim]))
self.__trainable.append(self.__hr_combination_bias)
self.__trainable.append(self.__tr_combination_bias)
else:
self.__hr_combination_matrix = tf.get_variable("matrix_hr_combination_layer",
[embed_dim * 2, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=555))
self.__tr_combination_matrix = tf.get_variable("matrix_tr_combination_layer",
[embed_dim * 2, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=555))
self.__trainable.append(self.__hr_combination_matrix)
self.__trainable.append(self.__tr_combination_matrix)
self.__hr_combination_bias = tf.get_variable("combination_bias_hr",
initializer=tf.zeros([embed_dim]))
self.__tr_combination_bias = tf.get_variable("combination_bias_tr",
initializer=tf.zeros([embed_dim]))
self.__trainable.append(self.__hr_combination_bias)
self.__trainable.append(self.__tr_combination_bias)
@staticmethod
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
return tf.mul(x, x_inv_norm, name=name)
@staticmethod
def sampled_softmax(tensor, weights):
max_val = tf.reduce_max(tensor * tf.abs(weights), 1, keep_dims=True)
tensor_rescaled = tensor - max_val
tensor_exp = tf.exp(tensor_rescaled)
tensor_sum = tf.reduce_sum(tensor_exp * tf.abs(weights), 1, keep_dims=True)
return (tensor_exp / tensor_sum) * tf.abs(weights) # all ignored elements will have a prob of 0.
def train(self, inputs, regularizer_weight=1., scope=None):
with tf.variable_scope(scope or type(self).__name__) as scp:
if self.__initialized:
scp.reuse_variables()
rel_embedding = self.__rel_embedding
normalized_ent_embedding = self.__ent_embedding
hr_tlist, hr_tlist_weight, tr_hlist, tr_hlist_weight = inputs
# (?, dim)
hr_tlist_h = tf.nn.embedding_lookup(normalized_ent_embedding, hr_tlist[:, 0])
hr_tlist_r = tf.nn.embedding_lookup(rel_embedding, hr_tlist[:, 1])
# (?, dim)
tr_hlist_t = tf.nn.embedding_lookup(normalized_ent_embedding, tr_hlist[:, 0])
tr_hlist_r = tf.nn.embedding_lookup(rel_embedding, tr_hlist[:, 1])
if self.__combination_method.lower() == 'simple':
# shape (?, dim)
hr_tlist_hr = hr_tlist_h * self.__hr_weighted_vector[
:self.__embed_dim] + hr_tlist_r * self.__hr_weighted_vector[
self.__embed_dim:]
hrt_res = tf.matmul(tf.nn.dropout(tf.tanh(hr_tlist_hr + self.__hr_combination_bias), self.__dropout),
self.__ent_embedding,
transpose_b=True)
tr_hlist_tr = tr_hlist_t * self.__tr_weighted_vector[
:self.__embed_dim] + tr_hlist_r * self.__tr_weighted_vector[
self.__embed_dim:]
trh_res = tf.matmul(tf.nn.dropout(tf.tanh(tr_hlist_tr + self.__tr_combination_bias), self.__dropout),
self.__ent_embedding,
transpose_b=True)
self.regularizer_loss = regularizer_loss = tf.reduce_sum(
tf.abs(self.__hr_weighted_vector)) + tf.reduce_sum(tf.abs(
self.__tr_weighted_vector)) + tf.reduce_sum(tf.abs(self.__ent_embedding)) + tf.reduce_sum(
tf.abs(self.__rel_embedding))
else:
hr_tlist_hr = tf.nn.dropout(tf.tanh(tf.matmul(tf.concat(1, [hr_tlist_h, hr_tlist_r]),
self.__hr_combination_matrix) + self.__hr_combination_bias),
self.__dropout)
hrt_res = tf.matmul(hr_tlist_hr, self.__ent_embedding, transpose_b=True)
tr_hlist_tr = tf.nn.dropout(tf.tanh(tf.matmul(tf.concat(1, [tr_hlist_t, tr_hlist_r]),
self.__tr_combination_matrix) + self.__tr_combination_bias),
self.__dropout)
trh_res = tf.matmul(tr_hlist_tr, self.__ent_embedding, transpose_b=True)
self.regularizer_loss = regularizer_loss = tf.reduce_sum(
tf.abs(self.__hr_combination_matrix)) + tf.reduce_sum(tf.abs(
self.__tr_combination_matrix)) + tf.reduce_sum(tf.abs(self.__ent_embedding)) + tf.reduce_sum(
tf.abs(self.__rel_embedding))
hrt_res_sigmoid = tf.sigmoid(hrt_res)
hrt_loss = -tf.reduce_sum(
tf.log(tf.clip_by_value(hrt_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., hr_tlist_weight)
+ tf.log(tf.clip_by_value(1 - hrt_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., tf.neg(hr_tlist_weight)))
trh_res_sigmoid = tf.sigmoid(trh_res)
trh_loss = -tf.reduce_sum(
tf.log(tf.clip_by_value(trh_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., tr_hlist_weight)
+ tf.log(tf.clip_by_value(1 - trh_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., tf.neg(tr_hlist_weight)))
return hrt_loss + trh_loss + regularizer_loss * regularizer_weight
def test(self, inputs, scope=None):
with tf.variable_scope(scope or type(self).__name__) as scp:
scp.reuse_variables()
h = tf.nn.embedding_lookup(self.__ent_embedding, inputs[:, 0])
t = tf.nn.embedding_lookup(self.__ent_embedding, inputs[:, 1])
r = tf.nn.embedding_lookup(self.__rel_embedding, inputs[:, 2])
ent_mat = tf.transpose(self.__ent_embedding)
if self.__combination_method.lower() == 'simple':
# predict tails
hr = h * self.__hr_weighted_vector[:self.__embed_dim] + r * self.__hr_weighted_vector[
self.__embed_dim:]
hrt_res = tf.sigmoid(tf.matmul(tf.tanh(hr + self.__hr_combination_bias), ent_mat))
_, tail_ids = tf.nn.top_k(hrt_res, k=self.__n_entity)
# predict heads
tr = t * self.__tr_weighted_vector[:self.__embed_dim] + r * self.__tr_weighted_vector[self.__embed_dim:]
trh_res = tf.sigmoid(tf.matmul(tf.tanh(tr + self.__tr_combination_bias), ent_mat))
_, head_ids = tf.nn.top_k(trh_res, k=self.__n_entity)
else:
hr = tf.matmul(tf.concat(1, [h, r]), self.__hr_combination_matrix)
hrt_res = tf.sigmoid(tf.matmul(tf.tanh(hr + self.__hr_combination_bias), ent_mat))
_, tail_ids = tf.nn.top_k(hrt_res, k=self.__n_entity)
tr = tf.matmul(tf.concat(1, [t, r]), self.__tr_combination_matrix)
trh_res = tf.sigmoid(tf.matmul(tf.tanh(tr + self.__tr_combination_bias), ent_mat))
_, head_ids = tf.nn.top_k(trh_res, k=self.__n_entity)
return head_ids, tail_ids
def train_ops(model: ProjE, learning_rate=0.1, optimizer_str='gradient', regularizer_weight=1.0):
with tf.device('/cpu'):
train_hrt_input = tf.placeholder(tf.int32, [None, 2])
train_hrt_weight = tf.placeholder(tf.float32, [None, model.n_entity])
train_trh_input = tf.placeholder(tf.int32, [None, 2])
train_trh_weight = tf.placeholder(tf.float32, [None, model.n_entity])
loss = model.train([train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight],
regularizer_weight=regularizer_weight)
if optimizer_str == 'gradient':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_str == 'rms':
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
elif optimizer_str == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
else:
raise NotImplementedError("Does not support %s optimizer" % optimizer_str)
grads = optimizer.compute_gradients(loss, model.trainable_variables)
op_train = optimizer.apply_gradients(grads)
return train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, loss, op_train
def test_ops(model: ProjE):
with tf.device('/cpu'):
test_input = tf.placeholder(tf.int32, [None, 3])
head_ids, tail_ids = model.test(test_input)
return test_input, head_ids, tail_ids
def worker_func(in_queue: JoinableQueue, out_queue: Queue, hr_t, tr_h):
while True:
dat = in_queue.get()
if dat is None:
in_queue.task_done()
continue
testing_data, head_pred, tail_pred = dat
out_queue.put(test_evaluation(testing_data, head_pred, tail_pred, hr_t, tr_h))
in_queue.task_done()
def data_generator_func(in_queue: JoinableQueue, out_queue: Queue, tr_h, hr_t, n_entity, neg_weight):
while True:
dat = in_queue.get()
if dat is None:
break
# [head(tail), relation, #of_total_positive_candidates, positive_instances..., negative_instances...]
hr_tlist = list()
hr_tweight = list()
tr_hlist = list()
tr_hweight = list()
htr = dat
for idx in range(htr.shape[0]):
if np.random.uniform(-1, 1) > 0: # t r predict h
tr_hweight.append(
[1. if x in tr_h[htr[idx, 1]][htr[idx, 2]] else y for
x, y in enumerate(np.random.choice([0., -1.], size=n_entity, p=[1 - neg_weight, neg_weight]))])
tr_hlist.append([htr[idx, 1], htr[idx, 2]])
else: # h r predict t
hr_tweight.append(
[1. if x in hr_t[htr[idx, 0]][htr[idx, 2]] else y for
x, y in enumerate(np.random.choice([0., -1.], size=n_entity, p=[1 - neg_weight, neg_weight]))])
hr_tlist.append([htr[idx, 0], htr[idx, 2]])
out_queue.put((np.asarray(hr_tlist, dtype=np.int32), np.asarray(hr_tweight, dtype=np.float32),
np.asarray(tr_hlist, dtype=np.int32), np.asarray(tr_hweight, dtype=np.float32)))
def test_evaluation(testing_data, head_pred, tail_pred, hr_t, tr_h):
assert len(testing_data) == len(head_pred)
assert len(testing_data) == len(tail_pred)
mean_rank_h = list()
mean_rank_t = list()
filtered_mean_rank_h = list()
filtered_mean_rank_t = list()
for i in range(len(testing_data)):
h = testing_data[i, 0]
t = testing_data[i, 1]
r = testing_data[i, 2]
# mean rank
mr = 0
for val in head_pred[i]:
if val == h:
mean_rank_h.append(mr)
break
mr += 1
mr = 0
for val in tail_pred[i]:
if val == t:
mean_rank_t.append(mr)
mr += 1
# filtered mean rank
fmr = 0
for val in head_pred[i]:
if val == h:
filtered_mean_rank_h.append(fmr)
break
if t in tr_h and r in tr_h[t] and val in tr_h[t][r]:
continue
else:
fmr += 1
fmr = 0
for val in tail_pred[i]:
if val == t:
filtered_mean_rank_t.append(fmr)
break
if h in hr_t and r in hr_t[h] and val in hr_t[h][r]:
continue
else:
fmr += 1
return (mean_rank_h, filtered_mean_rank_h), (mean_rank_t, filtered_mean_rank_t)
def main(_):
# create a log file
# clear everything (so that it only saves data for the most recent run)
sigmoid_log = open ("sigmoid_log.txt", "w")
sigmoid_log.write("")
sigmoid_log.close()
# open in append mode
sigmoid_log = open("sigmoid_log.txt", "a")
parser = argparse.ArgumentParser(description='ProjE.')
parser.add_argument('--data', dest='data_dir', type=str, help="Data folder", default='./data/FB15k/')
parser.add_argument('--lr', dest='lr', type=float, help="Learning rate", default=0.01)
parser.add_argument("--dim", dest='dim', type=int, help="Embedding dimension", default=200)
parser.add_argument("--batch", dest='batch', type=int, help="Batch size", default=200)
parser.add_argument("--comb", dest="combination_method", type=str, help="Combination method", default='simple')
parser.add_argument("--worker", dest='n_worker', type=int, help="Evaluation worker", default=3)
parser.add_argument("--generator", dest='n_generator', type=int, help="Data generator", default=10)
parser.add_argument("--eval_batch", dest="eval_batch", type=int, help="Evaluation batch size", default=500)
parser.add_argument("--save_dir", dest='save_dir', type=str, help="Model path", default='./')
parser.add_argument("--load_model", dest='load_model', type=str, help="Model file", default="")
parser.add_argument("--save_per", dest='save_per', type=int, help="Save per x iteration", default=10)
parser.add_argument("--eval_per", dest='eval_per', type=int, help="Evaluate every x iteration", default=1)
parser.add_argument("--max_iter", dest='max_iter', type=int, help="Max iteration", default=100)
parser.add_argument("--summary_dir", dest='summary_dir', type=str, help="summary directory",
default='./ProjE_summary/')
parser.add_argument("--keep", dest='drop_out', type=float, help="Keep prob (1.0 keep all, 0. drop all)",
default=0.5)
parser.add_argument("--optimizer", dest='optimizer', type=str, help="Optimizer", default='adam')
parser.add_argument("--prefix", dest='prefix', type=str, help="model_prefix", default='DEFAULT')
parser.add_argument("--loss_weight", dest='loss_weight', type=float, help="Weight on parameter loss", default=1e-5)
parser.add_argument("--neg_weight", dest='neg_weight', type=float, help="Sampling weight on negative examples",
default=0.5)
args = parser.parse_args()
sigmoid_log.write(args)
sigmoid_log.write("\n")
model = ProjE(args.data_dir, embed_dim=args.dim, combination_method=args.combination_method,
dropout=args.drop_out, neg_weight=args.neg_weight)
train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, \
train_loss, train_op = train_ops(model, learning_rate=args.lr,
optimizer_str=args.optimizer,
regularizer_weight=args.loss_weight)
test_input, test_head, test_tail = test_ops(model)
with tf.Session() as session:
tf.initialize_all_variables().run()
saver = tf.train.Saver()
iter_offset = 0
if args.load_model is not None and os.path.exists(args.load_model):
saver.restore(session, args.load_model)
iter_offset = int(args.load_model.split('.')[-2].split('_')[-1]) + 1
sigmoid_log.write("Load model from %s, iteration %d restored." % (args.load_model, iter_offset))
sigmoid_log.write("\n")
total_inst = model.n_train
# training data generator
raw_training_data_queue = Queue()
training_data_queue = Queue()
data_generators = list()
for i in range(args.n_generator):
data_generators.append(Process(target=data_generator_func, args=(
raw_training_data_queue, training_data_queue, model.train_tr_h, model.train_hr_t, model.n_entity, args.neg_weight)))
data_generators[-1].start()
evaluation_queue = JoinableQueue()
result_queue = Queue()
for i in range(args.n_worker):
worker = Process(target=worker_func, args=(evaluation_queue, result_queue, model.hr_t, model.tr_h))
worker.start()
for data_func, test_type in zip([model.validation_data, model.testing_data], ['VALID', 'TEST']):
accu_mean_rank_h = list()
accu_mean_rank_t = list()
accu_filtered_mean_rank_h = list()
accu_filtered_mean_rank_t = list()
evaluation_count = 0
for testing_data in data_func(batch_size=args.eval_batch):
head_pred, tail_pred = session.run([test_head, test_tail],
{test_input: testing_data})
evaluation_queue.put((testing_data, head_pred, tail_pred))
evaluation_count += 1
for i in range(args.n_worker):
evaluation_queue.put(None)
sigmoid_log.write("waiting for worker finishes their work")
sigmoid_log.write("\n")
evaluation_queue.join()
sigmoid_log.write("all worker stopped.")
sigmoid_log.write("\n")
while evaluation_count > 0:
evaluation_count -= 1
(mrh, fmrh), (mrt, fmrt) = result_queue.get()
accu_mean_rank_h += mrh
accu_mean_rank_t += mrt
accu_filtered_mean_rank_h += fmrh
accu_filtered_mean_rank_t += fmrt
sigmoid_log.write(
"[%s] INITIALIZATION [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, np.mean(accu_mean_rank_h), np.mean(accu_filtered_mean_rank_h),
np.mean(np.asarray(accu_mean_rank_h, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_h, dtype=np.int32) < 10)))
sigmoid_log.write("\n")
sigmoid_log.write(
"[%s] INITIALIZATION [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, np.mean(accu_mean_rank_t), np.mean(accu_filtered_mean_rank_t),
np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_t, dtype=np.int32) < 10)))
sigmoid_log.write("\n")
for n_iter in range(iter_offset, args.max_iter):
start_time = timeit.default_timer()
accu_loss = 0.
accu_re_loss = 0.
ninst = 0
sigmoid_log.write("initializing raw training data...")
sigmoid_log.write("\n")
nbatches_count = 0
for dat in model.raw_training_data(batch_size=args.batch):
raw_training_data_queue.put(dat)
nbatches_count += 1
sigmoid_log.write("raw training data initialized.")
sigmoid_log.write("\n")
while nbatches_count > 0:
nbatches_count -= 1
hr_tlist, hr_tweight, tr_hlist, tr_hweight = training_data_queue.get()
l, rl, _ = session.run(
[train_loss, model.regularizer_loss, train_op], {train_hrt_input: hr_tlist,
train_hrt_weight: hr_tweight,
train_trh_input: tr_hlist,
train_trh_weight: tr_hweight})
accu_loss += l
accu_re_loss += rl
ninst += len(hr_tlist) + len(tr_hlist)
if ninst % (5000) is not None:
sigmoid_log.write(
'[%d sec](%d/%d) : %.2f -- loss : %.5f rloss: %.5f ' % (
timeit.default_timer() - start_time, ninst, total_inst, float(ninst) / total_inst,
l / (len(hr_tlist) + len(tr_hlist)),
args.loss_weight * (rl / (len(hr_tlist) + len(tr_hlist)))),
end='\r')
sigmoid_log.write("\n")
sigmoid_log.write("")
sigmoid_log.write("\n")
sigmoid_log.write("iter %d avg loss %.5f, time %.3f" % (n_iter, accu_loss / ninst, timeit.default_timer() - start_time))
sigmoid_log.write("\n")
if n_iter % args.save_per == 0 or n_iter == args.max_iter - 1:
save_path = saver.save(session,
os.path.join(args.save_dir,
"ProjE_" + str(args.prefix) + "_" + str(n_iter) + ".ckpt"))
sigmoid_log.write("Model saved at %s" % save_path)
sigmoid_log.write("\n")
if n_iter % args.eval_per == 0 or n_iter == args.max_iter - 1:
for data_func, test_type in zip([model.validation_data, model.testing_data], ['VALID', 'TEST']):
accu_mean_rank_h = list()
accu_mean_rank_t = list()
accu_filtered_mean_rank_h = list()
accu_filtered_mean_rank_t = list()
evaluation_count = 0
for testing_data in data_func(batch_size=args.eval_batch):
head_pred, tail_pred = session.run([test_head, test_tail],
{test_input: testing_data})
evaluation_queue.put((testing_data, head_pred, tail_pred))
evaluation_count += 1
for i in range(args.n_worker):
evaluation_queue.put(None)
sigmoid_log.write("waiting for worker finishes their work")
sigmoid_log.write("\n")
evaluation_queue.join()
sigmoid_log.write("all worker stopped.")
sigmoid_log.write("\n")
while evaluation_count > 0:
evaluation_count -= 1
(mrh, fmrh), (mrt, fmrt) = result_queue.get()
accu_mean_rank_h += mrh
accu_mean_rank_t += mrt
accu_filtered_mean_rank_h += fmrh
accu_filtered_mean_rank_t += fmrt
sigmoid_log.write(
"[%s] ITER %d [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, n_iter, np.mean(accu_mean_rank_h), np.mean(accu_filtered_mean_rank_h),
np.mean(np.asarray(accu_mean_rank_h, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_h, dtype=np.int32) < 10)))
sigmoid_log.write("\n")
sigmoid_log.write(
"[%s] ITER %d [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, n_iter, np.mean(accu_mean_rank_t), np.mean(accu_filtered_mean_rank_t),
np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_t, dtype=np.int32) < 10)))
sigmoid_log.write("\n")
sigmoid_log.close()
if __name__ == '__main__':
tf.app.run()
|
lichessbot.py
|
import argparse
import chess
from chess import engine
from chess import variant
import chess.polyglot
import model
import json
import lichess
import logging
import multiprocessing
from multiprocessing import Process
import signal
import backoff
from config import load_config
from conversation import Conversation, ChatLine
from requests.exceptions import HTTPError, ReadTimeout
import os
logger = logging.getLogger(__name__)
try:
from http.client import RemoteDisconnected
# New in version 3.5: Previously, BadStatusLine('') was raised.
except ImportError:
from http.client import BadStatusLine as RemoteDisconnected
terminated = False
def signal_handler(signal, frame):
global terminated
logger.debug("Recieved SIGINT. Terminating client.")
terminated = True
signal.signal(signal.SIGINT, signal_handler)
def is_final(exception):
return isinstance(exception, HTTPError) and exception.response.status_code < 500
def upgrade_account(li):
if li.upgrade_to_bot_account() is None:
return False
logger.info("Succesfully upgraded to Bot Account!")
return True
def watch_control_stream(control_queue, li):
while not terminated:
try:
response = li.get_event_stream()
lines = response.iter_lines()
for line in lines:
if line:
event = json.loads(line.decode('utf-8'))
control_queue.put_nowait(event)
logger.info(event)
except:
logger.info("except")
pass
def start(li, user_profile, config):
challenge_config = config["challenge"]
logger.info("You're now connected to {} and awaiting challenges.".format(config["url"]))
control_queue=multiprocessing.Manager().Queue()
control_stream = Process(target=watch_control_stream, args=[control_queue,li])
control_stream.start()
while not terminated:
event=control_queue.get()
if event["type"] == "terminated":
break
elif event["type"] == "challenge":
logger.info("chlng detected")
chlng = model.Challenge(event["challenge"])
if chlng.is_supported(challenge_config):
logger.info("chlng supported")
try:
logger.info(" Accept {}".format(chlng))
response = li.accept_challenge(chlng.id)
except (HTTPError, ReadTimeout) as exception:
if isinstance(exception, HTTPError) and exception.response.status_code == 404: # ignore missing challenge
logger.info(" Skip missing {}".format(chlng))
else:
try:
li.decline_challenge(chlng.id)
logger.info(" Decline {}".format(chlng))
except:
pass
elif event["type"] == "gameStart":
logger.info("game detected")
game_id = event["game"]["id"]
play_game(li, game_id, user_profile, config)
logger.info("Terminated")
control_stream.terminate()
control_stream.join()
ponder_results = {}
@backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final)
def play_game(li, game_id, user_profile, config):
li.chat(game_id,"player","This bot is made by @master_bot and the code is his. To get help on making a bot, ask @master_bot")
li.chat(game_id,"spectator","This bot is made by @master_bot and the code is his. To get help on making a bot, ask @master_bot")
response = li.get_game_stream(game_id)
lines = response.iter_lines()
bullet=False
#Initial response of stream will be the full game info. Store it
initial_state = json.loads(next(lines).decode('utf-8'))
game = model.Game(initial_state, user_profile["username"], li.baseUrl, config.get("abort_time", 20))
timelim=game.state["btime"]/1000
timelim=timelim/60
if timelim>=0.5 and timelim<=2:
bullet=True
time=round(timelim/85*60,1)
if time>6:
time=6
elif time<0.3:
time=0.3
if bullet:
time=0.3
board = setup_board(game)
cfg = config["engine"]
if type(board).uci_variant=="chess":
engine_path = os.path.join(cfg["dir"], cfg["name"])
bookname="book.bin"
elif type(board).uci_variant=="atomic":
engine_path = os.path.join(cfg["dir"], cfg["lcname"])
bookname="bookchen.bin"
else:
engine_path = os.path.join(cfg["dir"], cfg["fairyname"])
bookname="bookchen.bin"
engineeng = engine.SimpleEngine.popen_uci(engine_path)
engineeng.configure({'Threads':5})
engineeng.configure({'Hash':120})
logger.info("+++ {}".format(game))
if is_engine_move(game, board.move_stack) and not is_game_over(game):
with chess.polyglot.open_reader(bookname) as reader:
movesob=[]
weight=[]
for entry in reader.find_all(board):
movesob.append(entry.move)
weight.append(entry.weight)
if len(weight)==0 or max(weight)<9:
move=engineeng.play(board,engine.Limit(time=time))
board.push(move.move)
li.make_move(game.id, move.move)
else:
move=movesob[weight.index(max(weight))]
board.push(move)
li.make_move(game.id, move)
with chess.polyglot.open_reader(bookname) as reader:
while not terminated:
try:
binary_chunk = next(lines)
except(StopIteration):
break
upd = json.loads(binary_chunk.decode('utf-8')) if binary_chunk else None
u_type = upd["type"] if upd else "ping"
if not board.is_game_over():
if u_type == "gameState":
game.state=upd
moves = upd["moves"].split()
board = update_board(board, moves[-1])
if not is_game_over(game) and is_engine_move(game, moves):
moves=[]
weight=[]
for entry in reader.find_all(board):
moves.append(entry.move)
weight.append(entry.weight)
if len(weight)==0 or max(weight)<9:
move=engineeng.play(board,engine.Limit(time=time))
board.push(move.move)
li.make_move(game.id, move.move)
else:
move=moves[weight.index(max(weight))]
board.push(move)
li.make_move(game.id, move)
if board.turn == chess.WHITE:
game.ping(config.get("abort_time", 20), (upd["wtime"] + upd["winc"]) / 1000 + 60)
else:
game.ping(config.get("abort_time", 20), (upd["btime"] + upd["binc"]) / 1000 + 60)
elif u_type == "ping":
if game.should_abort_now():
logger.info(" Aborting {} by lack of activity".format(game.url()))
li.abort(game.id)
break
elif game.should_terminate_now():
logger.info(" Terminating {} by lack of activity".format(game.url()))
if game.is_abortable():
li.abort(game.id)
break
else:
logger.info("game over")
engineeng.quit()
break
def is_white_to_move(game, moves):
return len(moves) % 2 == (0 if game.white_starts else 1)
def setup_board(game):
if game.variant_name.lower() == "chess960":
board = chess.Board(game.initial_fen, chess960=True)
elif game.variant_name == "From Position":
board = chess.Board(game.initial_fen)
else:
VariantBoard = variant.find_variant(game.variant_name)
board = VariantBoard()
moves = game.state["moves"].split()
for move in moves:
board = update_board(board, move)
return board
def is_engine_move(game, moves):
return game.is_white == is_white_to_move(game, moves)
def is_game_over(game):
return game.state["status"] != "started"
def update_board(board, move):
uci_move = chess.Move.from_uci(move)
if board.is_legal(uci_move):
board.push(uci_move)
else:
logger.debug('Ignoring illegal move {} on board {}'.format(move, board.fen()))
return board
def intro():
return r"""
. _/|
. // o\
. || ._) lichess-bot
. //__\
. )___( Play on Lichess with a bot
"""
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Play on Lichess with a bot')
parser.add_argument('-u', action='store_true', help='Add this flag to upgrade your account to a bot account.')
parser.add_argument('-v', action='store_true', help='Verbose output. Changes log level from INFO to DEBUG.')
parser.add_argument('--config', help='Specify a configuration file (defaults to ./config.yml)')
parser.add_argument('-l', '--logfile', help="Log file to append logs to.", default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO, filename=args.logfile,
format="%(asctime)-15s: %(message)s")
logger.info(intro())
CONFIG = load_config(args.config or "./config.yml")
li = lichess.Lichess(CONFIG["token"], CONFIG["url"], "1.1.5")
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
logger.info("Welcome {}!".format(username))
if is_bot is False:
is_bot = upgrade_account(li)
if is_bot:
start(li, user_profile, CONFIG)
else:
logger.error("{} is not a bot account. Please upgrade it to a bot account!".format(user_profile["username"]))
|
start_pipelined.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import subprocess
import threading
from collections import namedtuple
from concurrent.futures import Future
from enum import Enum
from magma.pipelined.app.base import MagmaController
from magma.pipelined.internal_ip_allocator import InternalIPAllocator
from magma.pipelined.qos.common import QosManager
from magma.pipelined.tests.app.exceptions import ServiceRunningError
from ryu.base.app_manager import AppManager
from ryu.lib import hub
class TestSetup(object):
"""
The TestSetup class variables
apps: [Controller]: ryu apps to instantiate
references: [Controller]: futures to get references of
instantiated apps
config: dict: config for ryu app
mconfig: dict: mconfig for ryu app
service_manager: ServiceManager: service manager for ryu app
integ_test: bool: set true when running tests in
integ setting
"""
def __init__(
self, apps, references, config, mconfig, loop,
service_manager, integ_test=False, rpc_stubs=None,
):
self.apps = apps
self.references = references
self.config = config
self.mconfig = mconfig
self.service_manager = service_manager
self.loop = loop
self.integ_test = integ_test
if rpc_stubs is None:
rpc_stubs = {}
self.rpc_stubs = rpc_stubs
Controller = namedtuple('Controller', ['name', 'app_future'])
class PipelinedController(Enum):
InOut = Controller(
'magma.pipelined.app.inout', 'inout',
)
Arp = Controller(
'magma.pipelined.app.arp', 'arpd',
)
IPV6RouterSolicitation = Controller(
'magma.pipelined.app.ipv6_solicitation',
'ipv6_solicitation',
)
GY = Controller(
'magma.pipelined.app.gy', 'gy',
)
Enforcement = Controller(
'magma.pipelined.app.enforcement', 'enforcement',
)
Enforcement_stats = Controller(
'magma.pipelined.app.enforcement_stats', 'enforcement_stats',
)
Testing = Controller(
'magma.pipelined.app.testing', 'testing',
)
AccessControl = Controller(
'magma.pipelined.app.access_control', 'access_control',
)
UEMac = Controller(
'magma.pipelined.app.ue_mac', 'ue_mac',
)
TunnelLearnController = Controller(
'magma.pipelined.app.tunnel_learn', 'tunnel_learn',
)
VlanLearn = Controller(
'magma.pipelined.app.vlan_learn', 'vlan_learn',
)
CheckQuotaController = Controller(
'magma.pipelined.app.check_quota', 'check_quota',
)
IPFIX = Controller(
'magma.pipelined.app.ipfix', 'ipfix',
)
LIMirror = Controller(
'magma.pipelined.app.li_mirror', 'li_mirror',
)
PacketTracer = Controller(
'magma.pipelined.app.packet_tracer', 'packet_tracer',
)
StartupFlows = Controller(
'magma.pipelined.app.startup_flows', 'startup_flows',
)
DPI = Controller(
'magma.pipelined.app.dpi', 'dpi',
)
UplinkBridge = Controller(
'magma.pipelined.app.uplink_bridge', 'uplink_bridge',
)
Conntrack = Controller(
'magma.pipelined.app.conntrack', 'conntrack',
)
Classifier = Controller(
'magma.pipelined.app.classifier', 'classifier',
)
HeaderEnrichment = Controller(
'magma.pipelined.app.he', 'proxy',
)
NGServiceController = Controller(
'magma.pipelined.app.ng_services', 'ng_services',
)
def assert_pipelined_not_running():
"""
As Ryu applications shoudn't be started if the magma@pipelined service is
running we need to verify if pipelined is active. If service is running
throws a ServiceRunningError exception.
This can be done using the command:
systemctl is-active magma@pipelined
If service is pipelined, this returns an error code 3 & message "inactive"
"""
try:
output = subprocess.check_output(
["systemctl", "is-active", "magma@pipelined"],
)
except subprocess.CalledProcessError as e:
if "inactive" not in str(e.output, 'utf-8'):
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"caused an error code %d, exception - %s"
% (e.returncode, str(e.output, 'utf-8').strip()),
)
else:
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"output - %s" % str(output, 'utf-8').strip(),
)
class StartThread(object):
"""
Starts ryu applications
Uses ryu hub and ryu app_manager to launch ryu applications. By using
futures get references to the instantiated apps. This allows unittests to
call methods from pipelined apps.
"""
_Event = namedtuple('_Event', ['func', 'future'])
def __init__(self, test_setup, launch_successful_future):
""" If verification fails throw an exception, don't start ryu apps """
if test_setup.integ_test is False:
hub.patch(thread=True)
assert_pipelined_not_running()
self._test_setup = test_setup
self.keep_running = True
self.done = False
self.event_queue = hub.Queue()
thread = threading.Thread(
target=self.start_ryu_apps, args=(launch_successful_future,),
)
thread.daemon = True
thread.start()
def start_ryu_apps(self, launch_successful_future):
"""
Starts up ryu applications, all the configuration is parsed from the
test_setup config provided in the unit test.
If apps throw an exception on launch, error is passed in the
launch_successful_future and will prevent infinitely waiting.
"""
self.reset_static_vars()
hub.spawn(self._process_queue)
app_lists = [a.value.name for a in self._test_setup.apps]
app_futures = {
controller.value.app_future: future
for (controller, future) in self._test_setup.references.items()
}
manager = AppManager.get_instance()
manager.load_apps(app_lists)
contexts = manager.create_contexts()
contexts['sids_by_ip'] = {} # shared by both metering apps
contexts['rule_id_mapper'] = \
self._test_setup.service_manager.rule_id_mapper
contexts['internal_ip_allocator'] = \
InternalIPAllocator(self._test_setup.config)
contexts['session_rule_version_mapper'] = \
self._test_setup.service_manager.session_rule_version_mapper
contexts['interface_to_prefix_mapper'] = \
self._test_setup.service_manager.interface_to_prefix_mapper
contexts['restart_info_store'] = \
self._test_setup.service_manager.restart_info_store
contexts['app_futures'] = app_futures
contexts['config'] = self._test_setup.config
contexts['mconfig'] = self._test_setup.mconfig
contexts['loop'] = self._test_setup.loop
contexts['rpc_stubs'] = self._test_setup.rpc_stubs
contexts['service_manager'] = self._test_setup.service_manager
contexts['ebpf_manager'] = None
contexts['qos_manager'] = QosManager(self._test_setup.loop, self._test_setup.config)
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s %(levelname)s %(name)s] %(message)s',
)
services = []
try:
services.extend(manager.instantiate_apps(**contexts))
except Exception as e:
launch_successful_future.set_result(
"Ryu apps launch exception: {}".format(e),
)
raise
launch_successful_future.set_result("Setup successful")
self.run(manager)
def _process_queue(self):
"""
Run a queue to process external events that need to be run in the Ryu
greenthread
"""
while self.keep_running:
try:
event = self.event_queue.get(block=False)
val = event.func()
event.future.set_result(val)
except hub.QueueEmpty:
pass
finally:
hub.sleep(0.1)
def run_in_greenthread(self, func):
"""
When not monkey patching (i.e. when running a gRPC server), you cannot
call directly into a Ryu app. To do this, there needs to be a boundary
between futures and hub.Queues. When this function is called, a lambda
is passed which is sent into a queue to be run by the Ryu greenthread.
"""
ev = self._Event(func=func, future=Future())
self.event_queue.put(ev)
return ev.future.result()
def run(self, manager):
""" Keep running until signalled from test file """
while self.keep_running:
hub.sleep(1)
manager.close()
self.done = True
def reset_static_vars(self):
""" Reset static vars for running nosetests """
AppManager._instance = AppManager()
MagmaController.TABLES = {}
|
test_logging.py
|
# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
import smtpd
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args, encoding="utf-8")
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt', encoding='utf-8'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
threading_helper.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
threading_helper.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
kwargs={{"encoding": "utf-8"}}
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, encoding="utf-8", **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
encoding="utf-8",
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn,
"encoding": "utf-8",
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class AssertErrorMessage:
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises((), *args, **kwargs)
except exception as e:
self.assertEqual(message, str(e))
class FormatterTest(unittest.TestCase, AssertErrorMessage):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid format: invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"invalid format: bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: expected '}' before end of string",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: Single '}' encountered in format string",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# bpo-20037: Doing text I/O late at interpreter shutdown must not crash
code = textwrap.dedent("""
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()
""")
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_logging_at_shutdown_open(self):
# bpo-26789: FileHandler keeps a reference to the builtin open()
# function to be able to open or reopen the file during Python
# finalization.
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
code = textwrap.dedent(f"""
import builtins
import logging
class A:
def __del__(self):
logging.error("log in __del__")
# basicConfig() opens the file, but logging.shutdown() closes
# it at Python exit. When A.__del__() is called,
# FileHandler._open() must be called again to re-open the file.
logging.basicConfig(filename={filename!r}, encoding="utf-8")
a = A()
# Simulate the Python finalization which removes the builtin
# open() function.
del builtins.open
""")
assert_python_ok("-c", code)
with open(filename, encoding="utf-8") as fp:
self.assertEqual(fp.read().rstrip(), "ERROR:root:log in __del__")
def test_recursion_error(self):
# Issue 36272
code = textwrap.dedent("""
import logging
def rec():
logging.error("foo")
rec()
rec()
""")
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
def test_get_level_names_mapping(self):
mapping = logging.getLevelNamesMapping()
self.assertEqual(logging._nameToLevel, mapping) # value is equivalent
self.assertIsNot(logging._nameToLevel, mapping) # but not the internal data
new_mapping = logging.getLevelNamesMapping() # another call -> another copy
self.assertIsNot(mapping, new_mapping) # verify not the same object as before
self.assertEqual(mapping, new_mapping) # but equivalent in value
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
@staticmethod # pickled as target of child process in the following test
def _extract_logrecord_process_name(key, logMultiprocessing, conn=None):
prev_logMultiprocessing = logging.logMultiprocessing
logging.logMultiprocessing = logMultiprocessing
try:
import multiprocessing as mp
name = mp.current_process().name
r1 = logging.makeLogRecord({'msg': f'msg1_{key}'})
del sys.modules['multiprocessing']
r2 = logging.makeLogRecord({'msg': f'msg2_{key}'})
results = {'processName' : name,
'r1.processName': r1.processName,
'r2.processName': r2.processName,
}
finally:
logging.logMultiprocessing = prev_logMultiprocessing
if conn:
conn.send(results)
else:
return results
def test_multiprocessing(self):
multiprocessing_imported = 'multiprocessing' in sys.modules
try:
# logMultiprocessing is True by default
self.assertEqual(logging.logMultiprocessing, True)
LOG_MULTI_PROCESSING = True
# When logMultiprocessing == True:
# In the main process processName = 'MainProcess'
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
results = self._extract_logrecord_process_name(1, LOG_MULTI_PROCESSING)
self.assertEqual('MainProcess', results['processName'])
self.assertEqual('MainProcess', results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
# In other processes, processName is correct when multiprocessing in imported,
# but it is (incorrectly) defaulted to 'MainProcess' otherwise (bpo-38762).
import multiprocessing
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(
target=self._extract_logrecord_process_name,
args=(2, LOG_MULTI_PROCESSING, child_conn,)
)
p.start()
results = parent_conn.recv()
self.assertNotEqual('MainProcess', results['processName'])
self.assertEqual(results['processName'], results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
p.join()
finally:
if multiprocessing_imported:
import multiprocessing
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', encoding='utf-8')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a', encoding='utf-8')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest, AssertErrorMessage):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: None',
self.logger.setLevel, None)
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: (0, 0)',
self.logger.setLevel, (0, 0))
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
def test_emit_after_closing_in_write_mode(self):
# Issue #42378
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', mode='w')
fh.setFormatter(logging.Formatter('%(message)s'))
fh.emit(self.next_rec()) # '1'
fh.close()
fh.emit(self.next_rec()) # '2'
with open(self.fn) as fp:
self.assertEqual(fp.read().strip(), '1')
class RotatingFileHandlerTest(BaseFileTest):
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8", maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(
self.fn, 'S', encoding="utf-8", backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', encoding="utf-8", delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='MIDNIGHT', interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='W%d' % day, interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
LocalAuthService.py
|
# Copyright (c) 2020.
# ThingiBrowser plugin is released under the terms of the LGPLv3 or higher.
import threading
from http.server import HTTPServer
from typing import Optional
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QDesktopServices
from UM.Signal import Signal # type: ignore
class LocalAuthService:
"""
Service that organizes multiple parallel authentication flows with web services.
"""
_server = None # type: Optional[HTTPServer]
_thread = None # type: Optional[threading.Thread]
# Signal emitted with as first argument the received token.
# We use a signal instead of a callback function in order to pass the token back to the Qt thread safely.
onTokenReceived = Signal()
@classmethod
def start(cls, url: str) -> None:
"""
Start the server in a separate thread and open the authentication page.
"""
if not cls._server:
# FIXME: when importing globally this causes issues with UM.Signal.Signal in PyTest
from ..api.ImplicitAuthRequestHandler import ImplicitAuthRequestHandler
ImplicitAuthRequestHandler.onTokenReceived.connect(cls.onTokenReceived)
cls._server = HTTPServer(("0.0.0.0", 55444), ImplicitAuthRequestHandler)
if not cls._thread:
cls._thread = threading.Thread(name="LocalAuthService", target=cls._server.serve_forever, daemon=True)
if not cls._thread.is_alive():
cls._thread.start()
QDesktopServices.openUrl(QUrl(url))
|
main.py
|
#! /usr/bin/env python3
import argparse
import threading
import logging
from scapy.all import sniff, Ether, IP
from .database import create_tables, Entity, create_session, drop_tables
from queue import Queue
packet_queue = Queue()
def on_packet(p):
if Ether not in p or IP not in p:
return
packet_queue.put(p)
def process_data():
packet_count = 0
while packet := packet_queue.get():
packet_count += 1
if packet_count % 100 == 0:
logging.info(f'Queue size: {packet_queue.qsize()}')
mac = packet[Ether].src
ip = packet[IP].src
session = create_session()
query = session.query(Entity).filter_by(mac=mac, ip=ip)
if query.count() > 0:
logging.debug(f'skipping packet {ip} {mac}')
continue
entity = Entity(mac=mac, ip=ip)
session.add(entity)
session.commit()
logging.info(f'Added entity {entity}')
session.close()
def main():
parser = argparse.ArgumentParser(description='Minidetector is an example tool for detecting network identities and insert them into a postgres database')
parser.add_argument("--clean", const=True, default=False, nargs='?', help="prune the existing data before starting")
parser.add_argument("--debug", const=True, default=False, nargs='?', help="enable debug logging")
args = parser.parse_args()
logging.root.setLevel(logging.DEBUG if args.debug else logging.INFO)
if args.clean:
logging.debug('Dropping all tables')
drop_tables()
logging.debug('Creating all tables')
create_tables()
logging.debug('Starting sniffing thread')
sniffing_thread = threading.Thread(target=lambda: sniff(prn=on_packet), daemon=True)
sniffing_thread.start()
logging.debug('Starting to process packets')
process_data()
if __name__ == '__main__':
main()
|
MeteorReval.py
|
import sublime
import functools
import sublime_plugin
import urllib.request
import re
PROJECT_NAME = 'meteor_reval'
settings = None
RUN_COMMAND = PROJECT_NAME
PROJECT_SETTINGS_KEY = PROJECT_NAME
def plugin_loaded():
global settings
settings = sublime.load_settings("MeteorReval.sublime-settings")
""" Pattern taken from
<https://github.com/jonlabelle/SublimeJsPrettier/blob/8d731207666f003ca31ec3646c9ce3373c214105/JsPrettier.py>
"""
def get_project_setting(key):
project_settings = sublime.active_window().active_view().settings()
if not project_settings:
return None
project_setting = project_settings.get(PROJECT_SETTINGS_KEY)
if project_setting:
if key in project_setting:
return project_setting[key]
return None
def get_setting(key, default_value=None):
value = settings.get(key, default_value)
project_value = get_project_setting(key)
if project_value is None:
return value
return project_value
class meteorReval(sublime_plugin.EventListener):
pending = 0
def handle_timeout(self, view):
self.pending = self.pending - 1
if self.pending == 0:
view.run_command(RUN_COMMAND)
def on_modified_async(self, view):
if get_setting('reload_on_modified') is True:
required_path = get_setting('required_path')
required_regex = get_setting('required_regex')
file_path = view.file_name()
if (file_path and file_path.find(required_path) >= 0 and re.search(required_regex, file_path)):
self.pending = self.pending + 1
sublime.set_timeout(functools.partial(self.handle_timeout, view), get_setting('reload_debounce'))
class meteorRevalCommand(sublime_plugin.TextCommand):
def run(self, view):
if (self.view.file_name()):
path = get_setting('path')
hostname = get_setting('hostname')
port = get_setting('port')
endpoint = get_setting('endpoint')
url = 'http://' + hostname + ':' + str(port) + endpoint + '?filePath=' + self.view.file_name().replace(path, '')
print (url)
data = self.view.substr(sublime.Region(0, self.view.size()))
request = HttpAsyncRequest(url)
request.set_content(str.encode(data))
request.set_header('Content-type', 'text/plain')
request.send()
# Library for doing async HTTP (threading yeah!)
_user_agent = "qualia"
import threading as _threading
_is_old = 3 / 2 == 1 # Yeah, I'm sure there's a better way. Deal with it.
if _is_old:
import urllib as _urllib
import urllib2 as _urllib2
import urlparse as _urlparse
else:
import urllib as _urllib
import urllib.parse as _urllib_parse
import urllib.request as _urllib_request
def _parse_url(url):
return _urlparse.urlparse(url)
def set_user_agent(value):
global _user_agent
_user_agent = value
def decode_url_value(value):
if _is_old:
return _urllib.unquote(value).decode('utf8')
else:
return _urllib_parse.unquote(value)
def encode_url_value(value):
if _is_old:
return _urllib2.quote(value.encode('utf8'))
else:
return _urllib_parse.quote(value)
def _send_impl(req_obj, method, url, headers, content):
if _is_old:
opener = _urllib2.build_opener(_urllib2.HTTPHandler)
if content == None:
request = _urllib2.Request(url)
else:
request = _urllib2.Request(url, data=content)
else:
opener = _urllib_request.build_opener(_urllib_request.HTTPHandler)
if content == None:
request = _urllib_request.Request(url)
else:
request = _urllib_request.Request(url, data=content)
for header in headers:
request.add_header(header[0], header[1])
request.get_method = lambda:method
output = opener.open(request)
content = output.read()
headers = {}
for header_key in output.headers.keys():
headers[header_key] = output.headers[header_key]
response_message = output.msg
response_code = output.code
req_obj._set_result(response_code, response_message, content, headers)
class HttpAsyncRequest:
def __init__(self, url):
bad_format = False
try:
if _is_old:
url_parts = _parse_url(url)
else:
url_parts = _urllib_parse.urlparse(url)
if url_parts.scheme == '' or url_parts.netloc == '':
bad_format = True
except:
bad_format = True
if bad_format:
raise Exception("Bad URL! Bad!")
self.mutex = _threading.Lock()
self.method = 'GET'
self.scheme = url_parts.scheme
self.host = url_parts.hostname
self.port = url_parts.port
self.path = url_parts.path
self.fragment = url_parts.fragment
self.params = url_parts.params
self.original_query = url_parts.query # use this if query params are not modified
self.query = None # if modified, clear original_query and populate this with a dictionary lookup
self.header_formatting = {} # preserves the formatting of the header key
self.header_values = {} # canonical key of header with list of values of that header
self.content = None
self.set_header('User-Agent', _user_agent)
self.done = False
self.response_code = -1
self.response_message = None
self.response_content = None
self.response_headers_values = None
self.response_headers_formatting = None
def send(self):
url = self.scheme + '://' + self.host
if self.port != None:
url += ':' + str(self.port)
if self.path != None and self.path != '':
if self.path[0] != '/':
self.path = '/' + self.path
url += self.path
if self.params != None and self.params != '':
url += ';' + self.params
if self.query == None:
if self.original_query != '':
url += '?' + self.original_query
else:
queries = []
keys = self.query.keys()[:]
keys.sort() # deterministic requests
for key in keys:
e_key = encode_url_value(key)
for value in self.query[key]:
e_value = encode_url_value(value)
queries.append(e_key + '=' + e_value)
url += '?' + '&'.join(queries)
if self.fragment != '':
url += '#' + self.fragment
headers = []
keys = list(self.header_formatting.keys())
keys.sort()
for key in keys:
f_key = self.header_formatting[key]
for value in self.header_values[key]:
headers.append((f_key, value))
thread = _threading.Thread(target = _send_impl, args = (self, self.method, url, headers, self.content))
thread.daemon = True
thread.start()
def _set_result(self, code, message, content, headers):
self.mutex.acquire()
try:
self.response_code = code
self.response_message = message
self.response_content = content
self.response_headers_values = {}
self.response_headers_formatting = {}
for key in headers.keys():
ckey = key.lower()
self.response_headers_values[ckey] = headers[key]
self.response_headers_formatting[ckey] = key
finally:
self.mutex.release()
def is_complete(self):
self.mutex.acquire()
try:
return self.response_code != -1
finally:
self.mutex.release()
def _ensure_request_complete(self):
if not self.is_complete():
raise Exception("Cannot access response until request is complete.")
def get_response_code(self):
self._ensure_request_complete()
return self.response_code
def get_response_message(self):
self._ensure_request_complete()
return self.response_message
def get_response_header_names(self):
self._ensure_request_complete()
output = list(self.response_headers_formatting.values())
output.sort()
return output
def get_response_header(self, name):
self._ensure_request_complete()
return self.response_headers_values.get(name.lower(), None)
def get_response_content(self, mode='t'):
self._ensure_request_complete()
output = self.response_content
if mode == 't':
return output.decode('utf-8')
else:
return output
def set_header(self, key, value):
self.header_formatting[key.lower()] = key
self.header_values[key.lower()] = [value]
def add_header(self, key, value):
canonical_key = key.lower()
existing_headers = self.header_values.get(canonical_key, None)
if existing_headers == None:
self.set_header(key, value)
else:
existing_headers.append(value)
def clear_header(self, key):
canonical_key = key.lower()
if self.header_values.get(canonical_key, None) != None:
self.header_values.pop(canonical_key)
self.header_formatting.pop(canonical_key)
def set_method(self, method):
self.method = method
def set_content(self, content):
self.content = content
def _init_query(self):
if self.query == None:
query = [] if self.original_query != '' else self.original_query.split('&')
lookup_values = {}
for item in query:
parts = item.split('=')
if len(parts) >= 2:
item_key = decode_url_value(parts[0])
item_value = decode_url_value('='.join(parts[1:]))
existing_values = lookup_values.get(item_key, None)
if existing_values == None:
existing_values = []
lookup_values[item_key] = existing_values
existing_values.append(item_value)
self.query = lookup_values
def set_query(self, key, value):
self._init_query()
self.query[key] = [value]
def add_query(self, key, value):
self._init_query()
values = self.query.get(key, None)
if values != None:
values.append(value)
else:
self.query[key] = [value]
def clear_query(self, key):
self._init_query()
if self.query.get(key, None) != None:
self.query.pop(key)
def set_port(self, port):
self.port = port
def set_fragment(self, fragment):
self.fragment = fragment
def clear_fragment(self):
self.fragment = None
def set_scehem(self, scheme):
self.scheme = scheme
|
zmq02.py
|
"""
ZeroMQ (PyZMQ) の PUB-SUB パターンのサンプルです。
REFERENCES::
http://zguide.zeromq.org/page:all#Getting-the-Message-Out
"""
import datetime as dt
import multiprocessing as mp
import time
import zmq
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
"""ZeroMQ の PUB-SUB パターンのサンプルです。"""
_SERVERURL = 'tcp://*:5555'
_CLIENTURL = 'tcp://localhost:5555'
def exec(self):
"""サンプル処理を実行します。"""
# ----------------------------------------------------
# サーバ側とクライアント側のプロセスを起動
#
# 今回 PUB-SUB パターンなので
# PUBLISHするプロセスに対して複数のSUBSCRIBERが存在する。
#
# PUB側は毎秒の値をPUBLISHし、SUB側はそれぞれフィルターに合致した
# データを受信して表示する。フィルターを空文字にすると全部取得となる。
#
#
# 参考)
# http://zguide.zeromq.org/py:wuserver
# http://zguide.zeromq.org/py:wuclient
# https://stackoverflow.com/questions/13904626/zeromq-and-multiple-subscribe-filters-in-python
# https://stackoverflow.com/questions/6024003/why-doesnt-zeromq-work-on-localhost
#
# 注意)ZeroMQ のガイドにあるように、最初の発行は常に受信できない。
# http://zguide.zeromq.org/page:all#Getting-the-Message-Out
#
# 引用::
# the subscriber will always miss the first messages that the publisher sends.
#
# ----------------------------------------------------
server_proc = mp.Process(target=Sample.server, args=())
client_procs = [
mp.Process(target=Sample.client, args=(str(i),))
for i in range(10, 61, 10)
]
client_all_proc = mp.Process(target=Sample.client_all, args=())
# ----------------------------------------------------
# プロセス起動
# ----------------------------------------------------
for p in client_procs:
p.start()
else:
client_all_proc.start()
server_proc.start()
# ----------------------------------------------------
# プロセス終了待ち
# ----------------------------------------------------
for p in client_procs:
p.join()
else:
client_all_proc.join()
server_proc.join()
@staticmethod
def server():
"""PUB側の処理を行います。"""
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind(Sample._SERVERURL)
# ----------------------------------------------------
# 毎秒、値を発行
# ----------------------------------------------------
while True:
message = dt.datetime.now().strftime('%S')
socket.send_string(message)
print(f'[server] pub --> {message}', flush=True)
time.sleep(1)
@staticmethod
def client(sub_filter):
"""SUB側の処理を行います。(フィルター付き)"""
start = int(sub_filter)
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect(Sample._CLIENTURL)
# ----------------------------------------------------
# フィルター設定
# ----------------------------------------------------
for f in (str(i) for i in range(start, start + 10)):
socket.setsockopt_string(zmq.SUBSCRIBE, f)
# ----------------------------------------------------
# 受信処理
# ----------------------------------------------------
while True:
message = socket.recv_string()
print(f'\t[client-{sub_filter}]: {message}', flush=True)
@staticmethod
def client_all():
"""SUB側の処理を行います。(フィルター無し(全部取得))"""
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect(Sample._CLIENTURL)
# ----------------------------------------------------
# フィルター設定
# 空文字のフィルターは全取得を意味する。
# ----------------------------------------------------
socket.setsockopt_string(zmq.SUBSCRIBE, '')
# ----------------------------------------------------
# 受信処理
# ----------------------------------------------------
while True:
message = socket.recv_string()
print(f'\t[client-all]: {message}', flush=True)
def go():
obj = Sample()
obj.exec()
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import copy
import errno
import signal
import socket
import hashlib
import logging
import weakref
import threading
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.log.setup
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.zeromq
import salt.utils.versions
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError, SaltException
from salt._compat import ipaddress
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import tornado
import tornado.gen
import tornado.concurrent
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
def _get_master_uri(master_ip,
master_port,
source_ip=None,
source_port=None):
'''
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
'''
from salt.utils.zeromq import ip_bracket
master_uri = 'tcp://{master_ip}:{master_port}'.format(
master_ip=ip_bracket(master_ip), master_port=master_port)
if source_ip or source_port:
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip), source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_ip and not source_port:
master_uri = 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_port and not source_ip:
ip_any = '0.0.0.0' if ipaddress.ip_address(master_ip).version == 4 else ip_bracket('::')
master_uri = 'tcp://{ip_any}:{source_port};{master_ip}:{master_port}'.format(
ip_any=ip_any, source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
else:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
log.warning('Specific source IP / port for connecting to master returner port: configuraion ignored')
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncZeroMQReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
log.trace('Inserted key into loop_instance_map id %s for key %s and process %s',
id(loop_instance_map), key, os.getpid())
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug('Re-using AsyncZeroMQReqChannel for %s', key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ('_io_loop', '_refcount', '_refcount_lock'):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == 'message_client':
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(result, key,
AsyncReqMessageClientPool(result.opts,
args=(result.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop}))
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug('Connecting the Minion to the Master URI (for the return server): %s', self.master_uri)
self.message_client = AsyncReqMessageClientPool(self.opts,
args=(self.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop})
self._closing = False
def close(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
'This is not the last %s instance. Not closing yet.',
self.__class__.__name__
)
return
log.debug('Closing %s instance', self.__class__.__name__)
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self._io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self._io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self._io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
@property
def master_uri(self):
if 'master_uri' in self.opts:
return self.opts['master_uri']
# if by chance master_uri is not there..
if 'master_ip' in self.opts:
return _get_master_uri(self.opts['master_ip'],
self.opts['master_port'],
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_ret_port'))
# if we've reached here something is very abnormal
raise SaltException('ReqChannel: missing master_uri/master_ip in self.opts')
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if 'key' not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret['key'],
RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw)
raise tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast')
if self.opts.get('__role') == 'syndic':
self._socket.setsockopt(zmq.SUBSCRIBE, b'syndic')
else:
self._socket.setsockopt(
zmq.SUBSCRIBE,
salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b'')
self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id']))
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'])
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts['recon_default'] + self.opts['recon_max']
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def close(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
else:
self._stream.close(0)
elif hasattr(self, '_socket'):
self._socket.close(0)
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
self.publish_port = self.auth.creds['publish_port']
log.debug('Connecting the Minion to the Master publish port, using the URI: %s', self.master_pub)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return _get_master_uri(self.opts['master_ip'],
self.publish_port,
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_publish_port'))
@tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (self.opts.get('__role') != 'syndic' and message_target not in ('broadcast', self.hexid)) or \
(self.opts.get('__role') == 'syndic' and message_target not in ('broadcast', 'syndic')):
log.debug('Publish received for not this minion: %s', message_target)
raise tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin,
salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
self.__setup_signals()
salt.utils.process.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except (KeyboardInterrupt, SystemExit):
break
def close(self):
'''
Cleanly shutdown the router socket
'''
if self._closing:
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
# pylint: disable=E0203
if getattr(self, '_monitor', None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, '_w_monitor', None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if hasattr(self, '_socket') and self._socket.closed is False:
self._socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
'''
Starts ZMQ monitor for debugging purposes.
:return:
'''
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
log.debug('Starting ZMQ monitor')
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug('ZMQ monitor has been started started')
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket %s', self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc:
exc_type = type(exc).__name__
if exc_type == 'AuthenticationError':
log.debug(
'Minion failed to auth to master. Since the payload is '
'encrypted, it is not known which minion failed to '
'authenticate. It is likely that this is a transient '
'failure due to the master rotating its public key.'
)
else:
log.error('Bad load from minion: %s: %s', exc_type, exc)
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: %s and load was %s', payload, payload.get('load'))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
)
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return tornado.gen.sleep(5)
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
if log_queue:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on %s', pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.debug('Publish daemon getting data from puller %s', pull_uri)
package = pull_sock.recv()
log.debug('Publish daemon received payload. size=%d', len(package))
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
log.trace('Accepted unpacked package from puller')
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
log.trace('Sending filtered data over publisher %s', pub_uri)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(salt.utils.stringutils.to_bytes(topic)).hexdigest())
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent')
# Syndic broadcast
if self.opts.get('order_masters'):
log.trace('Sending filtered data to syndic')
pub_sock.send(b'syndic', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent to syndic')
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace('Sending broadcasted data over publisher %s', pub_uri)
pub_sock.send(b'broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Broadcasted data has been sent')
else:
log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri)
pub_sock.send(payload)
log.trace('Unfiltered data has been sent')
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except KeyboardInterrupt:
log.trace('Publish daemon caught Keyboard interupt, tearing down')
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
@property
def pub_sock(self):
'''
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
'''
try:
return self._sock_data.sock
except AttributeError:
pass
def pub_connect(self):
'''
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
'''
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
'''
Disconnect an existing publisher socket and remove it from the local
thread's cache.
'''
if hasattr(self._sock_data, 'sock'):
self._sock_data.sock.close()
delattr(self._sock_data, 'sock')
def publish(self, load):
'''
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
'Sending payload to publish daemon. jid=%s size=%d',
load.get('jid', None), len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug('Sent payload to publish daemon.')
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
self._closing = False
def close(self):
if self._closing:
return
self._closing = True
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
install_zmq()
ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
self._closing = False
# TODO: timeout all in-flight sessions, or error
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'stream') and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug('Trying to connect to: %s', self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
@tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=W0702
log.debug('Re-init ZMQ socket: %s', err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. (%s/%s)', future.attempts, future.tries)
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
'''
Return a future which will be completed when the message has a response
'''
if future is None:
future = tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: %s", evt)
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
test_wrapper.py
|
from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from six.moves import range
import numpy as np
import numpy.linalg as la
import pytest
import pyopencl as cl
import pyopencl.array as cl_array
import pyopencl.cltypes as cltypes
import pyopencl.clrandom
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
from pyopencl.characterize import get_pocl_version
# Are CL implementations crashy? You be the judge. :)
try:
import faulthandler # noqa
except ImportError:
pass
else:
faulthandler.enable()
def _skip_if_pocl(plat, up_to_version, msg='unsupported by pocl'):
if plat.vendor == "The pocl project":
if up_to_version is None or get_pocl_version(plat) <= up_to_version:
import pytest
pytest.skip(msg)
def test_get_info(ctx_factory):
ctx = ctx_factory()
device, = ctx.devices
platform = device.platform
failure_count = [0]
pocl_quirks = [
(cl.Buffer, cl.mem_info.OFFSET),
(cl.Program, cl.program_info.BINARIES),
(cl.Program, cl.program_info.BINARY_SIZES),
]
if ctx._get_cl_version() >= (1, 2) and cl.get_cl_header_version() >= (1, 2):
pocl_quirks.extend([
(cl.Program, cl.program_info.KERNEL_NAMES),
(cl.Program, cl.program_info.NUM_KERNELS),
])
CRASH_QUIRKS = [ # noqa
(("NVIDIA Corporation", "NVIDIA CUDA",
"OpenCL 1.0 CUDA 3.0.1"),
[
(cl.Event, cl.event_info.COMMAND_QUEUE),
]),
(("NVIDIA Corporation", "NVIDIA CUDA",
"OpenCL 1.2 CUDA 7.5"),
[
(cl.Buffer, getattr(cl.mem_info, "USES_SVM_POINTER", None)),
]),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.8-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.8"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.9-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.9"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.10-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.10"),
pocl_quirks),
(("Apple", "Apple",
"OpenCL 1.2"),
[
(cl.Program, cl.program_info.SOURCE),
]),
]
QUIRKS = [] # noqa
def find_quirk(quirk_list, cl_obj, info):
for (vendor, name, version), quirks in quirk_list:
if (
vendor == platform.vendor
and name == platform.name
and platform.version.startswith(version)):
for quirk_cls, quirk_info in quirks:
if (isinstance(cl_obj, quirk_cls)
and quirk_info == info):
return True
return False
def do_test(cl_obj, info_cls, func=None, try_attr_form=True):
if func is None:
def func(info):
cl_obj.get_info(info)
for info_name in dir(info_cls):
if not info_name.startswith("_") and info_name != "to_string":
print(info_cls, info_name)
info = getattr(info_cls, info_name)
if find_quirk(CRASH_QUIRKS, cl_obj, info):
print("not executing get_info", type(cl_obj), info_name)
print("(known crash quirk for %s)" % platform.name)
continue
try:
func(info)
except Exception:
msg = "failed get_info", type(cl_obj), info_name
if find_quirk(QUIRKS, cl_obj, info):
msg += ("(known quirk for %s)" % platform.name)
else:
failure_count[0] += 1
if try_attr_form:
try:
getattr(cl_obj, info_name.lower())
except Exception:
print("failed attr-based get_info", type(cl_obj), info_name)
if find_quirk(QUIRKS, cl_obj, info):
print("(known quirk for %s)" % platform.name)
else:
failure_count[0] += 1
do_test(platform, cl.platform_info)
do_test(device, cl.device_info)
do_test(ctx, cl.context_info)
props = 0
if (device.queue_properties
& cl.command_queue_properties.PROFILING_ENABLE):
profiling = True
props = cl.command_queue_properties.PROFILING_ENABLE
queue = cl.CommandQueue(ctx,
properties=props)
do_test(queue, cl.command_queue_info)
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
do_test(prg, cl.program_info)
do_test(prg, cl.program_build_info,
lambda info: prg.get_build_info(device, info),
try_attr_form=False)
n = 2000
a_buf = cl.Buffer(ctx, 0, n*4)
do_test(a_buf, cl.mem_info)
kernel = prg.sum
do_test(kernel, cl.kernel_info)
evt = kernel(queue, (n,), None, a_buf)
do_test(evt, cl.event_info)
if profiling:
evt.wait()
do_test(evt, cl.profiling_info,
lambda info: evt.get_profiling_info(info),
try_attr_form=False)
# crashes on intel...
# and pocl does not support CL_ADDRESS_CLAMP
if device.image_support and platform.vendor not in [
"Intel(R) Corporation",
"The pocl project",
]:
smp = cl.Sampler(ctx, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
do_test(smp, cl.sampler_info)
img_format = cl.get_supported_image_formats(
ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0]
img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256))
assert img.shape == (128, 256)
img.depth
img.image.depth
do_test(img, cl.image_info,
lambda info: img.get_image_info(info))
def test_int_ptr(ctx_factory):
def do_test(obj):
new_obj = type(obj).from_int_ptr(obj.int_ptr)
assert obj == new_obj
assert type(obj) is type(new_obj)
ctx = ctx_factory()
device, = ctx.devices
platform = device.platform
do_test(device)
do_test(platform)
do_test(ctx)
queue = cl.CommandQueue(ctx)
do_test(queue)
evt = cl.enqueue_marker(queue)
do_test(evt)
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
do_test(prg)
do_test(prg.sum)
n = 2000
a_buf = cl.Buffer(ctx, 0, n*4)
do_test(a_buf)
# crashes on intel...
# and pocl does not support CL_ADDRESS_CLAMP
if device.image_support and platform.vendor not in [
"Intel(R) Corporation",
"The pocl project",
]:
smp = cl.Sampler(ctx, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
do_test(smp)
img_format = cl.get_supported_image_formats(
ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0]
img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256))
do_test(img)
def test_invalid_kernel_names_cause_failures(ctx_factory):
ctx = ctx_factory()
device = ctx.devices[0]
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
try:
prg.sam
raise RuntimeError("invalid kernel name did not cause error")
except AttributeError:
pass
except RuntimeError:
if "Intel" in device.platform.vendor:
from pytest import xfail
xfail("weird exception from OpenCL implementation "
"on invalid kernel name--are you using "
"Intel's implementation? (if so, known bug in Intel CL)")
else:
raise
def test_image_format_constructor():
# doesn't need image support to succeed
iform = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
assert iform.channel_order == cl.channel_order.RGBA
assert iform.channel_data_type == cl.channel_type.FLOAT
assert not iform.__dict__
def test_device_topology_amd_constructor():
# doesn't need cl_amd_device_attribute_query support to succeed
topol = cl.DeviceTopologyAmd(3, 4, 5)
assert topol.bus == 3
assert topol.device == 4
assert topol.function == 5
assert not topol.__dict__
def test_nonempty_supported_image_formats(ctx_factory):
context = ctx_factory()
device = context.devices[0]
if device.image_support:
assert len(cl.get_supported_image_formats(
context, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)) > 0
else:
from pytest import skip
skip("images not supported on %s" % device.name)
def test_that_python_args_fail(ctx_factory):
context = ctx_factory()
prg = cl.Program(context, """
__kernel void mult(__global float *a, float b, int c)
{ a[get_global_id(0)] *= (b+c); }
""").build()
a = np.random.rand(50000)
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
knl = cl.Kernel(prg, "mult")
try:
knl(queue, a.shape, None, a_buf, 2, 3)
assert False, "PyOpenCL should not accept bare Python types as arguments"
except cl.LogicError:
pass
try:
prg.mult(queue, a.shape, None, a_buf, float(2), 3)
assert False, "PyOpenCL should not accept bare Python types as arguments"
except cl.LogicError:
pass
prg.mult(queue, a.shape, None, a_buf, np.float32(2), np.int32(3))
a_result = np.empty_like(a)
cl.enqueue_read_buffer(queue, a_buf, a_result).wait()
def test_image_2d(ctx_factory):
context = ctx_factory()
device, = context.devices
if not device.image_support:
from pytest import skip
skip("images not supported on %s" % device)
if "Intel" in device.vendor and "31360.31426" in device.version:
from pytest import skip
skip("images crashy on %s" % device)
_skip_if_pocl(device.platform, None, 'pocl does not support CL_ADDRESS_CLAMP')
prg = cl.Program(context, """
__kernel void copy_image(
__global float *dest,
__read_only image2d_t src,
sampler_t samp,
int stride0)
{
int d0 = get_global_id(0);
int d1 = get_global_id(1);
/*
const sampler_t samp =
CLK_NORMALIZED_COORDS_FALSE
| CLK_ADDRESS_CLAMP
| CLK_FILTER_NEAREST;
*/
dest[d0*stride0 + d1] = read_imagef(src, samp, (float2)(d1, d0)).x;
}
""").build()
num_channels = 1
a = np.random.rand(1024, 512, num_channels).astype(np.float32)
if num_channels == 1:
a = a[:, :, 0]
queue = cl.CommandQueue(context)
try:
a_img = cl.image_from_array(context, a, num_channels)
except cl.RuntimeError:
import sys
exc = sys.exc_info()[1]
if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED:
from pytest import skip
skip("required image format not supported on %s" % device.name)
else:
raise
a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes)
samp = cl.Sampler(context, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
prg.copy_image(queue, a.shape, None, a_dest, a_img, samp,
np.int32(a.strides[0]/a.dtype.itemsize))
a_result = np.empty_like(a)
cl.enqueue_copy(queue, a_result, a_dest)
good = la.norm(a_result - a) == 0
if not good:
if queue.device.type & cl.device_type.CPU:
assert good, ("The image implementation on your CPU CL platform '%s' "
"returned bad values. This is bad, but common."
% queue.device.platform)
else:
assert good
def test_image_3d(ctx_factory):
#test for image_from_array for 3d image of float2
context = ctx_factory()
device, = context.devices
if not device.image_support:
from pytest import skip
skip("images not supported on %s" % device)
if device.platform.vendor == "Intel(R) Corporation":
from pytest import skip
skip("images crashy on %s" % device)
_skip_if_pocl(device.platform, None, 'pocl does not support CL_ADDRESS_CLAMP')
prg = cl.Program(context, """
__kernel void copy_image_plane(
__global float2 *dest,
__read_only image3d_t src,
sampler_t samp,
int stride0,
int stride1)
{
int d0 = get_global_id(0);
int d1 = get_global_id(1);
int d2 = get_global_id(2);
/*
const sampler_t samp =
CLK_NORMALIZED_COORDS_FALSE
| CLK_ADDRESS_CLAMP
| CLK_FILTER_NEAREST;
*/
dest[d0*stride0 + d1*stride1 + d2] = read_imagef(
src, samp, (float4)(d2, d1, d0, 0)).xy;
}
""").build()
num_channels = 2
shape = (3, 4, 2)
a = np.random.random(shape + (num_channels,)).astype(np.float32)
queue = cl.CommandQueue(context)
try:
a_img = cl.image_from_array(context, a, num_channels)
except cl.RuntimeError:
import sys
exc = sys.exc_info()[1]
if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED:
from pytest import skip
skip("required image format not supported on %s" % device.name)
else:
raise
a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes)
samp = cl.Sampler(context, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
prg.copy_image_plane(queue, shape, None, a_dest, a_img, samp,
np.int32(a.strides[0]/a.itemsize/num_channels),
np.int32(a.strides[1]/a.itemsize/num_channels),
)
a_result = np.empty_like(a)
cl.enqueue_copy(queue, a_result, a_dest)
good = la.norm(a_result - a) == 0
if not good:
if queue.device.type & cl.device_type.CPU:
assert good, ("The image implementation on your CPU CL platform '%s' "
"returned bad values. This is bad, but common."
% queue.device.platform)
else:
assert good
def test_copy_buffer(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a = np.random.rand(50000).astype(np.float32)
b = np.empty_like(a)
buf1 = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
buf2 = cl.Buffer(context, mf.WRITE_ONLY, b.nbytes)
cl.enqueue_copy_buffer(queue, buf1, buf2).wait()
cl.enqueue_read_buffer(queue, buf2, b).wait()
assert la.norm(a - b) == 0
def test_mempool(ctx_factory):
from pyopencl.tools import MemoryPool, ImmediateAllocator
context = ctx_factory()
queue = cl.CommandQueue(context)
pool = MemoryPool(ImmediateAllocator(queue))
alloc_queue = []
e0 = 12
for e in range(e0-6, e0-4):
for i in range(100):
alloc_queue.append(pool.allocate(1 << e))
if len(alloc_queue) > 10:
alloc_queue.pop(0)
del alloc_queue
pool.stop_holding()
def test_mempool_2():
from pyopencl.tools import MemoryPool
from random import randrange
for i in range(2000):
s = randrange(1 << 31) >> randrange(32)
bin_nr = MemoryPool.bin_number(s)
asize = MemoryPool.alloc_size(bin_nr)
assert asize >= s, s
assert MemoryPool.bin_number(asize) == bin_nr, s
assert asize < asize*(1+1/8)
def test_vector_args(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
prg = cl.Program(context, """
__kernel void set_vec(float4 x, __global float4 *dest)
{ dest[get_global_id(0)] = x; }
""").build()
x = cltypes.make_float4(1, 2, 3, 4)
dest = np.empty(50000, cltypes.float4)
mf = cl.mem_flags
dest_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=dest)
prg.set_vec(queue, dest.shape, None, x, dest_buf)
cl.enqueue_read_buffer(queue, dest_buf, dest).wait()
assert (dest == x).all()
def test_header_dep_handling(ctx_factory):
context = ctx_factory()
from os.path import exists
assert exists("empty-header.h") # if this fails, change dir to pyopencl/test
kernel_src = """
#include <empty-header.h>
kernel void zonk(global int *a)
{
*a = 5;
}
"""
import os
cl.Program(context, kernel_src).build(["-I", os.getcwd()])
cl.Program(context, kernel_src).build(["-I", os.getcwd()])
def test_context_dep_memoize(ctx_factory):
context = ctx_factory()
from pyopencl.tools import context_dependent_memoize
counter = [0]
@context_dependent_memoize
def do_something(ctx):
counter[0] += 1
do_something(context)
do_something(context)
assert counter[0] == 1
def test_can_build_binary(ctx_factory):
ctx = ctx_factory()
device, = ctx.devices
program = cl.Program(ctx, """
__kernel void simple(__global float *in, __global float *out)
{
out[get_global_id(0)] = in[get_global_id(0)];
}""")
program.build()
binary = program.get_info(cl.program_info.BINARIES)[0]
foo = cl.Program(ctx, [device], [binary])
foo.build()
def test_enqueue_barrier_marker(ctx_factory):
ctx = ctx_factory()
# Still relevant on pocl 0.14.
_skip_if_pocl(
ctx.devices[0].platform, (0, 14), 'pocl crashes on enqueue_barrier')
queue = cl.CommandQueue(ctx)
cl.enqueue_barrier(queue)
evt1 = cl.enqueue_marker(queue)
evt2 = cl.enqueue_marker(queue, wait_for=[evt1])
cl.enqueue_barrier(queue, wait_for=[evt1, evt2])
def test_wait_for_events(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
evt1 = cl.enqueue_marker(queue)
evt2 = cl.enqueue_marker(queue)
cl.wait_for_events([evt1, evt2])
def test_unload_compiler(platform):
if (platform._get_cl_version() < (1, 2) or
cl.get_cl_header_version() < (1, 2)):
from pytest import skip
skip("clUnloadPlatformCompiler is only available in OpenCL 1.2")
_skip_if_pocl(platform, (0, 13), 'pocl does not support unloading compiler')
if platform.vendor == "Intel(R) Corporation":
from pytest import skip
skip("Intel proprietary driver does not support unloading compiler")
cl.unload_platform_compiler(platform)
def test_enqueue_task(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
prg = cl.Program(ctx, """
__kernel void
reverse(__global const float *in, __global float *out, int n)
{
for (int i = 0;i < n;i++) {
out[i] = in[n - 1 - i];
}
}
""").build()
knl = prg.reverse
n = 100
a = np.random.rand(n).astype(np.float32)
b = np.empty_like(a)
buf1 = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
buf2 = cl.Buffer(ctx, mf.WRITE_ONLY, b.nbytes)
knl.set_args(buf1, buf2, np.int32(n))
cl.enqueue_task(queue, knl)
cl.enqueue_copy(queue, b, buf2).wait()
assert la.norm(a[::-1] - b) == 0
def test_platform_get_devices(ctx_factory):
ctx = ctx_factory()
platform = ctx.devices[0].platform
if platform.name == "Apple":
pytest.xfail("Apple doesn't understand all the values we pass "
"for dev_type")
dev_types = [cl.device_type.ACCELERATOR, cl.device_type.ALL,
cl.device_type.CPU, cl.device_type.DEFAULT, cl.device_type.GPU]
if (platform._get_cl_version() >= (1, 2) and
cl.get_cl_header_version() >= (1, 2)
and not platform.name.lower().startswith("nvidia")):
dev_types.append(cl.device_type.CUSTOM)
for dev_type in dev_types:
print(dev_type)
devs = platform.get_devices(dev_type)
if dev_type in (cl.device_type.DEFAULT,
cl.device_type.ALL,
getattr(cl.device_type, 'CUSTOM', None)):
continue
for dev in devs:
assert dev.type & dev_type == dev_type
def test_user_event(ctx_factory):
ctx = ctx_factory()
if (ctx._get_cl_version() < (1, 1) and
cl.get_cl_header_version() < (1, 1)):
from pytest import skip
skip("UserEvent is only available in OpenCL 1.1")
# https://github.com/pocl/pocl/issues/201
_skip_if_pocl(ctx.devices[0].platform, (0, 13),
"pocl's user events don't work right")
status = {}
def event_waiter1(e, key):
e.wait()
status[key] = True
def event_waiter2(e, key):
cl.wait_for_events([e])
status[key] = True
from threading import Thread
from time import sleep
evt = cl.UserEvent(ctx)
Thread(target=event_waiter1, args=(evt, 1)).start()
sleep(.05)
if status.get(1, False):
raise RuntimeError('UserEvent triggered before set_status')
evt.set_status(cl.command_execution_status.COMPLETE)
sleep(.05)
if not status.get(1, False):
raise RuntimeError('UserEvent.wait timeout')
assert evt.command_execution_status == cl.command_execution_status.COMPLETE
evt = cl.UserEvent(ctx)
Thread(target=event_waiter2, args=(evt, 2)).start()
sleep(.05)
if status.get(2, False):
raise RuntimeError('UserEvent triggered before set_status')
evt.set_status(cl.command_execution_status.COMPLETE)
sleep(.05)
if not status.get(2, False):
raise RuntimeError('cl.wait_for_events timeout on UserEvent')
assert evt.command_execution_status == cl.command_execution_status.COMPLETE
def test_buffer_get_host_array(ctx_factory):
ctx = ctx_factory()
mf = cl.mem_flags
host_buf = np.random.rand(25).astype(np.float32)
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.USE_HOST_PTR, hostbuf=host_buf)
host_buf2 = buf.get_host_array(25, np.float32)
assert (host_buf == host_buf2).all()
assert (host_buf.__array_interface__['data'][0] ==
host_buf.__array_interface__['data'][0])
assert host_buf2.base is buf
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.ALLOC_HOST_PTR, size=100)
try:
host_buf2 = buf.get_host_array(25, np.float32)
assert False, ("MemoryObject.get_host_array should not accept buffer "
"without USE_HOST_PTR")
except cl.LogicError:
pass
host_buf = np.random.rand(25).astype(np.float32)
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=host_buf)
try:
host_buf2 = buf.get_host_array(25, np.float32)
assert False, ("MemoryObject.get_host_array should not accept buffer "
"without USE_HOST_PTR")
except cl.LogicError:
pass
def test_program_valued_get_info(ctx_factory):
ctx = ctx_factory()
prg = cl.Program(ctx, """
__kernel void
reverse(__global float *out)
{
out[get_global_id(0)] *= 2;
}
""").build()
knl = prg.reverse
assert knl.program == prg
knl.program.binaries[0]
def test_event_set_callback(ctx_factory):
import sys
if sys.platform.startswith("win"):
pytest.xfail("Event.set_callback not present on Windows")
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if ctx._get_cl_version() < (1, 1):
pytest.skip("OpenCL 1.1 or newer required fro set_callback")
a_np = np.random.rand(50000).astype(np.float32)
b_np = np.random.rand(50000).astype(np.float32)
got_called = []
def cb(status):
got_called.append(status)
mf = cl.mem_flags
a_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_np)
b_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_np)
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a_g, __global const float *b_g,
__global float *res_g) {
int gid = get_global_id(0);
res_g[gid] = a_g[gid] + b_g[gid];
}
""").build()
res_g = cl.Buffer(ctx, mf.WRITE_ONLY, a_np.nbytes)
uevt = cl.UserEvent(ctx)
evt = prg.sum(queue, a_np.shape, None, a_g, b_g, res_g, wait_for=[uevt])
evt.set_callback(cl.command_execution_status.COMPLETE, cb)
uevt.set_status(cl.command_execution_status.COMPLETE)
queue.finish()
# yuck
from time import sleep
sleep(0.1)
assert got_called
def test_global_offset(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
prg = cl.Program(context, """
__kernel void mult(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
n = 50
a = np.random.rand(n).astype(np.float32)
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
step = 10
for ofs in range(0, n, step):
prg.mult(queue, (step,), None, a_buf, global_offset=(ofs,))
a_2 = np.empty_like(a)
cl.enqueue_copy(queue, a_2, a_buf)
assert (a_2 == 2*a).all()
def test_sub_buffers(ctx_factory):
ctx = ctx_factory()
if (ctx._get_cl_version() < (1, 1) or
cl.get_cl_header_version() < (1, 1)):
from pytest import skip
skip("sub-buffers are only available in OpenCL 1.1")
alignment = ctx.devices[0].mem_base_addr_align
queue = cl.CommandQueue(ctx)
n = 30000
a = (np.random.rand(n) * 100).astype(np.uint8)
mf = cl.mem_flags
a_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
start = (5000 // alignment) * alignment
stop = start + 20 * alignment
a_sub_ref = a[start:stop]
a_sub = np.empty_like(a_sub_ref)
cl.enqueue_copy(queue, a_sub, a_buf[start:stop])
assert np.array_equal(a_sub, a_sub_ref)
def test_spirv(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if (ctx._get_cl_version() < (2, 1) or
cl.get_cl_header_version() < (2, 1)):
from pytest import skip
skip("SPIR-V program creation only available in OpenCL 2.1 and higher")
n = 50000
a_dev = cl.clrandom.rand(queue, n, np.float32)
b_dev = cl.clrandom.rand(queue, n, np.float32)
dest_dev = cl_array.empty_like(a_dev)
with open("add-vectors-%d.spv" % queue.device.address_bits, "rb") as spv_file:
spv = spv_file.read()
prg = cl.Program(ctx, spv)
prg.sum(queue, a_dev.shape, None, a_dev.data, b_dev.data, dest_dev.data)
assert la.norm((dest_dev - (a_dev+b_dev)).get()) < 1e-7
def test_coarse_grain_svm(ctx_factory):
import sys
is_pypy = '__pypy__' in sys.builtin_module_names
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
dev = ctx.devices[0]
has_svm = (ctx._get_cl_version() >= (2, 0) and
cl.get_cl_header_version() >= (2, 0))
if dev.platform.name == "Portable Computing Language":
has_svm = (
get_pocl_version(dev.platform) >= (1, 0)
and cl.get_cl_header_version() >= (2, 0))
if not has_svm:
from pytest import skip
skip("SVM only available in OpenCL 2.0 and higher")
if ("AMD" in dev.platform.name
and dev.type & cl.device_type.CPU):
pytest.xfail("AMD CPU doesn't do coarse-grain SVM")
n = 3000
svm_ary = cl.SVM(cl.csvm_empty(ctx, (n,), np.float32, alignment=64))
if not is_pypy:
# https://bitbucket.org/pypy/numpy/issues/52
assert isinstance(svm_ary.mem.base, cl.SVMAllocation)
cl.enqueue_svm_memfill(queue, svm_ary, np.zeros((), svm_ary.mem.dtype))
with svm_ary.map_rw(queue) as ary:
ary.fill(17)
orig_ary = ary.copy()
prg = cl.Program(ctx, """
__kernel void twice(__global float *a_g)
{
a_g[get_global_id(0)] *= 2;
}
""").build()
prg.twice(queue, svm_ary.mem.shape, None, svm_ary)
with svm_ary.map_ro(queue) as ary:
print(ary)
assert np.array_equal(orig_ary*2, ary)
new_ary = np.empty_like(orig_ary)
new_ary.fill(-1)
if ctx.devices[0].platform.name != "Portable Computing Language":
# "Blocking memcpy is unimplemented (clEnqueueSVMMemcpy.c:61)"
# in pocl up to and including 1.0rc1.
cl.enqueue_copy(queue, new_ary, svm_ary)
assert np.array_equal(orig_ary*2, new_ary)
def test_fine_grain_svm(ctx_factory):
import sys
is_pypy = '__pypy__' in sys.builtin_module_names
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from pytest import skip
if (ctx._get_cl_version() < (2, 0) or
cl.get_cl_header_version() < (2, 0)):
skip("SVM only available in OpenCL 2.0 and higher")
if not (ctx.devices[0].svm_capabilities
& cl.device_svm_capabilities.FINE_GRAIN_BUFFER):
skip("device does not support fine-grain SVM")
n = 3000
ary = cl.fsvm_empty(ctx, n, np.float32, alignment=64)
if not is_pypy:
# https://bitbucket.org/pypy/numpy/issues/52
assert isinstance(ary.base, cl.SVMAllocation)
ary.fill(17)
orig_ary = ary.copy()
prg = cl.Program(ctx, """
__kernel void twice(__global float *a_g)
{
a_g[get_global_id(0)] *= 2;
}
""").build()
prg.twice(queue, ary.shape, None, cl.SVM(ary))
queue.finish()
print(ary)
assert np.array_equal(orig_ary*2, ary)
@pytest.mark.parametrize("dtype", [
np.uint,
cl.cltypes.uint2,
])
def test_map_dtype(ctx_factory, dtype):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
dt = np.dtype(dtype)
b = pyopencl.Buffer(ctx,
pyopencl.mem_flags.READ_ONLY,
dt.itemsize)
array, ev = pyopencl.enqueue_map_buffer(queue, b, pyopencl.map_flags.WRITE, 0,
(1,), dt)
with array.base:
print(array.dtype)
assert array.dtype == dt
if __name__ == "__main__":
# make sure that import failures get reported, instead of skipping the tests.
import pyopencl # noqa
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from py.test.cmdline import main
main([__file__])
|
queuemp.py
|
# queuemp.py
#
# An example of using queues with multiprocessing
def consumer(input_q):
while True:
# Get an item from the queue
item = input_q.get()
# Process item
print item
# Signal completion
input_q.task_done()
def producer(sequence,output_q):
for item in sequence:
# Put the item on the queue
output_q.put(item)
if __name__ == '__main__':
from multiprocessing import Process, JoinableQueue
q = JoinableQueue()
# Launch the consumer process
cons_p = Process(target=consumer,args=(q,))
cons_p.daemon = True
cons_p.start()
# Run the producer function on some data
sequence = range(100) # Replace with useful data
producer(sequence,q)
# Wait for the consumer to finish
q.join()
|
test_lock.py
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import tempfile
import unittest
from builtins import open, str
from multiprocessing import Manager, Process
from threading import Thread
from pants.process.lock import OwnerPrintingInterProcessFileLock
def hold_lock_until_terminate(path, lock_held, terminate):
lock = OwnerPrintingInterProcessFileLock(path)
lock.acquire()
lock_held.set()
# NOTE: We shouldn't ever wait this long, this is just to ensure
# we don't somehow leak child processes.
terminate.wait(60)
lock.release()
lock_held.clear()
class TestOwnerPrintingInterProcessFileLock(unittest.TestCase):
def setUp(self):
self.lock_dir = tempfile.mkdtemp()
self.lock_path = os.path.join(self.lock_dir, 'lock')
self.lock = OwnerPrintingInterProcessFileLock(self.lock_path)
self.manager = Manager()
self.lock_held = self.manager.Event()
self.terminate = self.manager.Event()
self.lock_process = Process(
target=hold_lock_until_terminate,
args=(self.lock_path, self.lock_held, self.terminate),
)
def tearDown(self):
self.terminate.set()
try:
shutil.rmtree(self.lock_dir)
except OSError:
pass
def test_non_blocking_attempt(self):
self.lock_process.start()
self.lock_held.wait()
self.assertFalse(self.lock.acquire(blocking=False))
def test_message(self):
self.lock_process.start()
self.lock_held.wait()
self.assertTrue(os.path.exists(self.lock.message_path))
with open(self.lock.message_path, 'r') as f:
message_content = f.read()
self.assertIn(str(self.lock_process.pid), message_content)
os.unlink(self.lock.message_path)
def message_fn(message):
self.assertIn(self.lock.missing_message_output, message)
self.lock.acquire(blocking=False, message_fn=message_fn)
def test_blocking(self):
self.lock_process.start()
self.lock_held.wait()
self.assertFalse(self.lock.acquire(timeout=.1))
acquire_is_blocking = self.manager.Event()
def terminate_subproc(terminate, acquire_is_blocking):
acquire_is_blocking.wait()
terminate.set()
Thread(target=terminate_subproc, args=(self.terminate, acquire_is_blocking)).start()
def message_fn(message):
self.assertIn(str(self.lock_process.pid), message)
acquire_is_blocking.set()
# NOTE: We shouldn't ever wait this long (locally this runs in ~milliseconds)
# but sometimes CI containers are extremely slow, so we choose a very large
# value just in case.
self.assertTrue(self.lock.acquire(timeout=30, message_fn=message_fn))
def test_reentrant(self):
self.assertTrue(self.lock.acquire())
self.assertTrue(self.lock.acquire())
def test_release(self):
self.assertTrue(self.lock.acquire())
self.assertTrue(self.lock.acquired)
self.lock.release()
self.assertFalse(self.lock.acquired)
|
getproxy.py
|
# -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2018-10-18 23:10:19
# @Last Modified by: gunjianpan
# @Last Modified time: 2020-06-01 13:50:44
import argparse
import codecs
import functools
import http.cookiejar as cj
import os
import random
import re
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
from apscheduler.schedulers.blocking import BlockingScheduler
from bs4 import BeautifulSoup
sys.path.append(os.getcwd())
from util.db import Db
from util.util import (
basic_req,
begin_time,
can_retry,
changeHtmlTimeout,
changeJsonTimeout,
echo,
end_time,
read_file,
time_str,
get_accept,
get_content_type,
)
"""
* www.proxyserverlist24.top
* www.live-socks.net
* gatherproxy.com
* goubanjia.com
xicidaili.com
data5u.com
66ip.com
kuaidaili.com
.data/
├── gatherproxy // gather proxy list
└── passage // gather passage
"""
data_dir = "proxy/data/"
MAXN = 0x3FFFFFFF
type_map = {1: "https", 0: "http"}
class GetFreeProxy:
""" proxy pool """
def __init__(self):
self.Db = Db("proxy")
self.insert_sql = """INSERT INTO ip_proxy( `address`, `http_type`) VALUES %s """
self.select_list = (
"""SELECT address, http_type from ip_proxy WHERE `is_failured` = 0"""
)
self.select_sql = """SELECT `id`, address, `is_failured` from ip_proxy WHERE `address` in %s """
self.select_all = """SELECT `address`, `http_type` from ip_proxy WHERE `is_failured` != 5 and http_type in %s"""
self.random_select = """SELECT `address`, `http_type` FROM ip_proxy WHERE `is_failured` >= 5 and (`id` >= ((SELECT MAX(`id`) FROM ip_proxy)-(SELECT MIN(`id`) FROM ip_proxy)) * RAND() + (SELECT MIN(`id`) FROM ip_proxy)) and http_type in %s LIMIT 6000"""
self.replace_ip = """REPLACE INTO ip_proxy(`id`, `address`, `http_type`, `is_failured`) VALUES %s"""
self.can_use_ip = {}
self.waitjudge = []
self.cannot_use_ip = {}
self.failured_time = {}
self.canuse_proxies = []
self.init_proxy()
def proxy_req(
self,
url: str,
types: int,
data=None,
header=None,
test_func=None,
need_cookie: bool = False,
config: dict = {},
proxies: dict = {},
):
"""
use proxy to send requests, and record the proxy can't use
@types S0XY: X=0.->get; =1.->post;
Y=0.->html; =1.->json; =2.->basic
S=0.->basic ;=1.->ss
support failured retry && failured auto record
"""
httptype = url[4] == "s"
ss_type = types // 1000
types %= 1000
if ss_type:
proxylist = self.proxylists_ss if httptype else self.proxylist_ss
else:
proxylist = self.proxylists if httptype else self.proxylist
if proxies != {}:
proxies = proxies
elif not len(proxylist):
if self.Db.db:
echo(
"0|critical",
"Proxy pool empty!!! Please check the db conn & db dataset!!!",
)
proxies = {}
else:
index = random.randint(0, len(proxylist) - 1)
proxies_url = proxylist[index]
proxies = {type_map[httptype]: proxies_url}
try:
result = basic_req(
url,
types=types,
proxies=proxies,
data=data,
header=header,
need_cookie=need_cookie,
config=config,
)
if test_func is not None:
if not test_func(result):
if self.check_retry(url):
return self.proxy_req(
url,
types=types + 1000 * ss_type,
data=data,
header=header,
test_func=test_func,
need_cookie=need_cookie,
config=config,
proxies=proxies,
)
else:
self.failured_time[url] = 0
return
return result
return result
except:
self.cannot_use_ip[random.randint(0, MAXN)] = proxies_url
if proxies_url in proxylist:
proxylist.remove(proxylist.index(proxies_url))
if not len(self.cannot_use_ip.keys()) % 10:
self.clean_cannot_use()
if self.check_retry(url):
return self.proxy_req(
url,
types=types + 1000 * ss_type,
data=data,
test_func=test_func,
header=header,
need_cookie=need_cookie,
config=config,
proxies=proxies,
)
def check_retry(self, url: str) -> bool:
""" check try time """
if url not in self.failured_time:
self.failured_time[url] = 0
return True
elif self.failured_time[url] < 3:
self.failured_time[url] += 1
return True
else:
self.log_write(url)
self.failured_time[url] = 0
return False
def log_write(self, url: str):
""" failure log """
echo("0|warning", "url {} retry max time".format(url))
def insert_proxy(self, insert_list: list):
""" insert data to db """
results = self.Db.insert_db(self.insert_sql % str(insert_list)[1:-1])
if results:
echo("2|info", "Insert " + str(len(insert_list)) + " items Success!")
def update_proxy(self, update_list: list, types: int):
""" update data to db"""
results = self.Db.update_db(self.replace_ip % str(update_list)[1:-1])
typemap = {0: "can use ", 1: "can not use "}
if results:
echo(
"2|info",
"Update",
typemap[types],
str(len(update_list)),
" items Success!",
)
def select_proxy(self, target_list: list) -> list:
""" select ip proxy by ids """
if not len(target_list):
return []
elif len(target_list) == 1:
waitlist = "('" + target_list[0] + "')"
else:
waitlist = tuple(target_list)
return self.Db.select_db(self.select_sql % str(waitlist))
def db_can_use_proxy(self):
""" test db have or not this data """
results = self.select_proxy([ii[0] for ii in self.can_use_ip.values()])
ss_len = len([1 for ii in self.can_use_ip.values() if ii[1] > 1])
echo("2|info", "SS proxies", ss_len)
insert_list = []
update_list = []
ip_map = {}
if results != False:
for ip_info in results:
ip_map[ip_info[1]] = [ip_info[0], ip_info[2]]
for ip_now in self.can_use_ip.values():
http_type = ip_now[1]
ip_now = ip_now[0]
if ip_now in ip_map:
if ip_map[ip_now][1]:
update_list.append((ip_map[ip_now][0], ip_now, http_type, 0))
else:
insert_list.append((ip_now, http_type))
if len(insert_list):
self.insert_proxy(insert_list)
if len(update_list):
self.update_proxy(update_list, 0)
else:
pass
self.can_use_ip = {}
def clean_cannot_use(self):
""" update db proxy cannot use """
results = self.select_proxy(self.cannot_use_ip.values())
update_list = []
ip_map = {}
if results:
for ip_info in results:
ip_map[ip_info[1]] = [ip_info[0], ip_info[2]]
for ip_now in self.cannot_use_ip.values():
http_type = ip_now[4] == "s"
if ip_now in ip_map:
update_list.append(
(ip_map[ip_now][0], ip_now, http_type, ip_map[ip_now][1] + 1)
)
if len(update_list):
self.update_proxy(update_list, 1)
self.cannot_use_ip = {}
def init_proxy(self):
""" init proxy list """
results = self.Db.select_db(self.select_list)
self.proxylist = []
self.proxylists = []
self.proxylist_ss = []
self.proxylists_ss = []
if not results:
echo(
"0|error", "Please check db configure!!! The proxy pool cant use!!!>>>"
)
return
for index in results:
if index[1] == 1:
self.proxylists.append(index[0])
elif index[1] == 2:
self.proxylist.append(index[0])
self.proxylist_ss.append(index[0])
elif index[1] == 3:
self.proxylists.append(index[0])
self.proxylists_ss.append(index[0])
else:
self.proxylist.append(index[0])
echo("2|info", len(self.proxylist), " http proxy can use.")
echo("2|info", len(self.proxylists), " https proxy can use.")
echo("2|info", len(self.proxylist_ss), " ss http proxy can use.")
echo("2|info", len(self.proxylists_ss), " ss https proxy can use.")
def judge_url(self, urls: str, index: int, times: int, ss_test: bool = False):
"""
use /api/playlist to judge http; use /discover/playlist judge https
1. don't timeout = 5
2. response.result.tracks.size() != 1
"""
http_type = urls[4] == "s"
proxies = {type_map[http_type]: urls}
test_url = (
type_map[http_type] + "://music.163.com/api/playlist/detail?id=432853362"
)
ss_url = "https://www.google.com/?gws_rd=ssl"
try:
data = basic_req(test_url, 1, proxies)
result = data["result"]
tracks = result["tracks"]
if len(tracks) == 10:
if times < 0:
self.judge_url(urls, index, times + 1)
else:
echo("1|debug", urls, proxies, "Proxies can use.")
self.canuse_proxies.append(urls)
self.can_use_ip[index] = [urls, int(http_type)]
if ss_test:
data = basic_req(ss_url, 0)
if len(str(data)) > 5000:
self.can_use_ip[index] = [urls, int(http_type) + 2]
else:
echo("0|debug", urls, proxies, "Tracks len error ^--<^>--^ ")
self.cannot_use_ip[index] = urls
except:
echo("0|debug", urls, proxies, "return error [][][][][][]")
if not index in self.can_use_ip:
self.cannot_use_ip[index] = urls
def thread_judge(self, batch_size: int = 500):
""" threading to judge proxy """
changeJsonTimeout(2)
changeHtmlTimeout(3)
proxy_exec = ThreadPoolExecutor(max_workers=batch_size // 2)
text = self.waitjudge
num = len(text)
for block in range(num // batch_size + 1):
proxy_th = [
proxy_exec.submit(self.judge_url, jj, ii, 0)
for ii, jj in enumerate(
text[block * batch_size : batch_size * (block + 1)]
)
]
list(as_completed(proxy_th))
self.db_can_use_proxy()
self.clean_cannot_use()
self.waitjudge = []
def test_db(self, types: int):
""" test proxy in db can use """
version = begin_time()
typestr = ""
if types == 2:
typestr = "(0,1,2,3)"
elif types == 1:
typestr = "(1,3)"
else:
typestr = "(0,2)"
results = self.Db.select_db(self.select_all % typestr)
random_select = self.Db.select_db(self.random_select % typestr)
if not results:
results = []
if not random_select:
random_select = []
for index in results + random_select:
self.waitjudge.append(index[0])
self.thread_judge()
self.init_proxy()
end_time(version, 2)
def xici_proxy(self, page: int):
"""
xici proxy http://www.xicidaili.com/nn/{page}
The first proxy I use, but now it can not use it mostly.
"""
if not str(page).isdigit():
echo("0|warning", "Please input num!")
return []
version = begin_time()
url = "http://www.xicidaili.com/nn/%d"
for index in range(1, page + 1):
html = basic_req(url % index, 0)
tem = html.find_all("tr")
for index in range(1, len(tem)):
tds = tem[index].find_all("td")
ip = tds[5].text.lower()
self.waitjudge.append("{}://{}:{}".format(ip, tds[1].text, tds[2].text))
self.thread_judge()
end_time(version, 2)
def gatherproxy(self, types: int):
"""
:100: very nice website
first of all you should download proxy ip txt from:
http://www.gatherproxy.com/zh/proxylist/country/?c=China
"""
if not os.path.exists("{}gatherproxy".format(data_dir)):
echo("0|warning", "Gather file not exist!!!")
return
file_d = read_file("{}gatherproxy".format(data_dir))
waitjudge_http = ["http://" + ii for ii in file_d]
waitjudge_https = ["https://" + ii for ii in file_d]
if not types:
self.waitjudge += waitjudge_http
elif types == 1:
self.waitjudge += waitjudge_https
elif types == 2:
self.waitjudge += waitjudge_http + waitjudge_https
else:
self.waitjudge += file_d
echo("2|warning", "load gather over!")
def goubanjia(self):
"""
:-1: html tag mixed with invalid data
:100:And the most important thing is the port writed in 'class' rather in text.
The website is difficult to spider, but the proxys are very goog
goubanjia proxy http://www.goubanjia.com
"""
version = begin_time()
host = "http://www.goubanjia.com"
html = self.proxy_req(host, 0)
if not html:
return []
trs = html.find_all("tr", class_=["warning", "success"])
for tr in trs:
tds = tr.find_all("td")
ip = tds[2].find_all("a")[0].text + "://"
iplist = tds[0].find_all(["div", "span", not "p"], class_=not "port")
for index in iplist:
ip += index.text
encode = tds[0].find_all(["div", "span", "p"], class_="port")[0]["class"][1]
uncode = functools.reduce(
lambda x, y: x * 10 + (ord(y) - ord("A")), map(lambda x: x, encode), 0
)
self.waitjudge.append(ip + ":" + str(int(uncode / 8)))
self.thread_judge()
end_time(version, 2)
def schedulegou(self):
sched = BlockingScheduler()
sched.add_job(self.goubanjia, "interval", seconds=100)
sched.start()
def data5u(self):
"""
data5u proxy http://www.data5u.com/
no one can use
"""
version = begin_time()
url_list = ["", "free/gngn/index.shtml", "free/gwgn/index.shtml"]
host = "http://www.data5u.com/"
for uri in url_list:
html = self.proxy_req(host + uri, 0)
if not html:
continue
table = html.find_all("ul", class_="l2")
for index in table:
tds = index.find_all("li")
ip = tds[3].text
self.waitjudge.append("{}://{}:{}".format(ip, tds[1].text, tds[2].text))
self.thread_judge()
end_time(version, 2)
def sixsixip(self, area: int, page: int):
"""
66ip proxy http://www.66ip.cn/areaindex_{area}/{page}.html
"""
version = begin_time()
threadings = []
for index in range(1, area + 1):
for pageindex in range(1, page + 1):
echo("2|debug", "{} {}".format(index, pageindex))
work = threading.Thread(
target=self.sixsixthread, args=(index, pageindex)
)
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
self.thread_judge()
end_time(version, 2)
def sixsixthread(self, index: int, pageindex: int):
host = """http://www.66ip.cn/areaindex_%d/%d.html"""
html = self.proxy_req(host % (index, pageindex), 0)
if not html:
return []
trs = html.find_all("table")[2].find_all("tr")
for test in range(1, len(trs) - 1):
tds = trs[test].find_all("td")
self.waitjudge.append("http://{}:{}".format(tds[0].text, tds[1].text))
self.waitjudge.append("https://{}:{}".format(tds[0].text, tds[1].text))
def kuaidaili(self, page: int):
"""
kuaidaili https://www.kuaidaili.com/free/
"""
version = begin_time()
threadings = []
for index in range(1, page + 1):
work = threading.Thread(target=self.kuaidailithread, args=(index,))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
self.thread_judge()
end_time(version, 2)
def kuaidailithread(self, index: int):
host = """https://www.kuaidaili.com/free/inha/%d/"""
html = self.proxy_req(host % index, 0)
if not html:
return []
trs = html.find_all("tr")
for index in range(1, len(trs)):
tds = trs[index].find_all("td")
ip = tds[3].text.lower() + "://" + tds[0].text + ":" + tds[1].text
self.waitjudge.append(ip)
def get_cookie(self):
"""
make cookie login
PS: Though cookie expired time is more than 1 year,
but It will be break when the connect close.
So you need reactive the cookie by this function.
"""
headers = {
"Cookie": "_lang=en-US; _ga=GA1.2.1084455496.1548351129; _gid=GA1.2.1515017701.1552361687; ASP.NET_SessionId=ckin3pzyqyoyt3zg54zrtrct; _gat=1; arp_scroll_position=57",
"Accept": get_accept("html") + ";q=0.9",
}
login_url = "http://www.gatherproxy.com/subscribe/login"
cookie_html = basic_req(login_url, 3, header=headers)
try:
verify_text = re.findall('<span class="blue">(.*?)</span>', cookie_html)[0]
except:
return
verify_list = verify_text.replace("= ", "").strip().split()
num_map = {
"Zero": 0,
"One": 1,
"Two": 2,
"Three": 3,
"Four": 4,
"Five": 5,
"Six": 6,
"Seven": 7,
"Eight": 8,
"Nine": 9,
"Ten": 10,
}
verify_num = [verify_list[0], verify_list[2]]
for index, num in enumerate(verify_num):
if num.isdigit():
verify_num[index] = int(num)
elif num in num_map:
verify_num[index] = num_map[num]
else:
echo("0|error", "Error", num)
# return False
verify_code = 0
error = True
operation = verify_list[1]
if (
operation == "+"
or operation == "plus"
or operation == "add"
or operation == "multiplied"
):
verify_code = verify_num[0] + verify_num[1]
error = False
if operation == "-" or operation == "minus":
verify_code = verify_num[0] - verify_num[1]
error = False
if operation == "X" or operation == "multiplication":
verify_code = verify_num[0] * verify_num[1]
error = False
if error:
echo("0|error", "Error", operation)
if not os.path.exists("%spassage" % data_dir):
echo("0|warning", "gather passage not exist!!!")
return
with codecs.open("%spassage" % data_dir, "r", encoding="utf-8") as f:
passage = [index[:-1] for index in f.readlines()]
data = {
"Username": passage[0],
"Password": passage[1],
"Captcha": str(verify_code),
}
time.sleep(2.163)
r = requests.session()
r.cookies = cj.LWPCookieJar()
login_req = r.post(login_url, headers=headers, data=data, verify=False)
def load_gather(self):
"""
load gather proxy pool text
If failured, you should reactive the cookie.
"""
headers = {
"Cookie": "_lang=en-US; _ga=GA1.2.1084455496.1548351129; _gid=GA1.2.1515017701.1552361687; ASP.NET_SessionId=ckin3pzyqyoyt3zg54zrtrct; _gat=1; arp_scroll_position=57",
"Accept": get_accept("html") + ";q=0.9",
}
url = "http://www.gatherproxy.com/subscribe/infos"
try:
sid_url_req = requests.get(url, headers=headers, verify=False, timeout=10)
except:
return
sid_url_html = BeautifulSoup(sid_url_req.text, "html.parser")
sid_url = sid_url_html.find_all("div", class_="wrapper")[1].find_all("a")[0][
"href"
]
if len(sid_url.split("sid=")) < 2:
echo("0|warning", "cookie error")
self.get_cookie()
self.load_gather()
return
sid = sid_url.split("sid=")[1]
sid_url = "http://www.gatherproxy.com" + sid_url
data = {"ID": sid, "C": "", "P": "", "T": "", "U": "0"}
gatherproxy = requests.post(sid_url, headers=headers, data=data, verify=False)
with codecs.open(data_dir + "gatherproxy", "w", encoding="utf-8") as f:
f.write(gatherproxy.text)
def load_proxies_list(self, types: int = 2):
""" load proxies """
SITES = ["http://www.proxyserverlist24.top/", "http://www.live-socks.net/"]
spider_pool = []
self.waitjudge = []
for site in SITES:
self.get_other_proxies(site)
self.gatherproxy(3)
waitjudge = list(set(self.waitjudge))
waitjudge_http = ["http://" + ii for ii in waitjudge]
waitjudge_https = ["https://" + ii for ii in waitjudge]
if not types:
self.waitjudge = waitjudge_http
elif types == 1:
self.waitjudge = waitjudge_https
else:
self.waitjudge = waitjudge_http + waitjudge_https
echo(
"1|info",
"-_-_-_-_-_-_-",
len(waitjudge),
"Proxies wait to judge -_-_-_-_-_-_-",
)
def request_text(self, url: str) -> str:
""" requests text """
req = basic_req(url, 2)
if req is None:
echo("0|debug", url)
if can_retry(url):
return self.request_text(url)
else:
return ""
echo("1|debug", url)
text = req.text
if type(text) == str:
return text
elif type(text) == bytes:
return text.decode()
else:
return ""
def get_free_proxy(self, url: str):
req = basic_req(url, 2)
if req is None:
return []
tt = req.text
t_list = re.findall("<tr><td>(\d*\.\d*\.\d*\.\d*)</td><td>(\d*?)</td>", tt)
echo(1, "Get Free proxy List", url, len(t_list))
return ["{}:{}".format(ii, jj) for ii, jj in t_list]
def get_proxy_free(self):
urls = [
"https://www.sslproxies.org",
"https://free-proxy-list.net",
"https://www.us-proxy.org",
"https://free-proxy-list.net/uk-proxy.html",
"https://free-proxy-list.net/anonymous-proxy.html",
"http://www.google-proxy.net",
]
t_list = []
for url in urls:
t_list.extend(self.get_free_proxy(url))
t_list.extend(self.get_api())
for ii in ["http", "https"]:
t_list.extend(self.get_download(ii))
t_list = list(set(t_list))
with open(data_dir + "gatherproxy", "w") as f:
f.write("\n".join(t_list))
def ip_decoder(self, data: str):
data = re.sub("\+", "\x20", data)
data = re.sub(
"%([a-fA-F0-9][a-fA-F0-9])",
lambda i: chr(int("0x" + i.group()[1:], 16)),
data,
)
return re.findall(">(.*?)</a", data)
def get_api(self):
API_KEY = "xxx"
url = "http://api.scraperapi.com/?api_key={}&url=http://httpbin.org/ip".format(
API_KEY
)
t_list = []
for ii in range(38):
tt = basic_req(url, 1)
if tt is None:
continue
t_list.append(tt["origin"])
echo(1, "Get scraperapi", len(t_list))
return t_list
def get_download(self, types: str):
url = "https://www.proxy-list.download/api/v0/get?l=en&t=" + types
tt = basic_req(url, 1)
if tt is None:
return []
tt_list = tt[0]["LISTA"]
echo(1, "Get download", types, len(tt_list))
return ["{}:{}".format(ii["IP"], ii["PORT"]) for ii in tt_list]
def get_other_proxies(self, url: str):
""" get other proxies """
pages = re.findall(
r"<h3[\s\S]*?<a.*?(http.*?\.html).*?</a>", self.request_text(url)
)
if not len(pages):
echo("0|warning", "Please do not frequently request {}!!!".format(url))
else:
proxies = [
re.findall(
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}", self.request_text(ii)
)
for ii in pages
]
self.waitjudge = [*self.waitjudge, *sum(proxies, [])]
def load_proxies_test(self):
""" load mode & test proxies """
version = begin_time()
self.load_proxies_list()
proxies_len = len(self.waitjudge)
self.thread_judge()
canuse_len = len(self.canuse_proxies)
echo(
"1|info",
"\nTotal Proxies num: {}\nCan use num: {}\nTime spend: {}\n".format(
proxies_len, canuse_len, end_time(version)
),
)
with open("{}canuse_proxies.txt".format(data_dir), "w") as f:
f.write("\n".join(self.canuse_proxies))
if __name__ == "__main__":
if not os.path.exists(data_dir):
os.makedirs(data_dir)
parser = argparse.ArgumentParser(description="gunjianpan proxy pool code")
parser.add_argument(
"--model", type=int, default=0, metavar="model", help="model 0/1"
)
parser.add_argument(
"--is_service",
type=bool,
default=False,
metavar="service",
help="True or False",
)
parser.add_argument(
"--test_time", type=int, default=1, metavar="test_time", help="test_time"
)
model = parser.parse_args().model
a = GetFreeProxy()
if model == 1:
a.get_proxy_free()
elif model == 0:
a.load_proxies_test()
a.test_db(2)
else:
a.test_db(2)
|
scd_coletor_thread.py
|
#!/bin/python2.7
from __builtin__ import id
import os
import commands
from netaddr import IPNetwork
from scd_new_analisador_conflitos import analise_Conflito
import time
from threading import Thread
import threading, Queue
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scd.settings")
from django.core.management import execute_from_command_line
from django.db.models import Count
import django.db.models.query
import django
import datetime
from django.shortcuts import render
from django.shortcuts import render_to_response, redirect
from django.http import HttpResponse
from django.template import RequestContext
from array import *
from django.db.models import *
from django.db.models import F
from datetime import timedelta
from django.utils import timezone
from django.db.models import Count
from django.db.models import Sum, Avg
from datetime import datetime
from scd import settings
from scd.scd_app.models import *
"""TERMINA INPORTACAO DJANGO"""
django.setup()
#DEFINE OS CAMPOS UTILIZADOS NA COLETA
class scd_flows(object):
def __init__(self):
self.flow_id = None
self.switch = None
self.nw_src = None
self.nw_dst = None
self.nw_src_full = None
self.nw_dst_full = None
self.nw_src_mask = None
self.nw_dst_mask = None
self.nw_proto = None
self.nw_tos = None
self.dl_src = None
self.dl_dst = None
self.dl_type = None
self.dl_vlan = None
self.actions = None
self.table = None
self.in_port = None
self.priority = None
self.idle_timeout = None
self.hard_timeout = None
self.tp_src = None
self.tp_dst = None
self.conflito = []
self.conflito_sugestao = []
self.conflito_nivel = []
#REALIZA A COLETA DOS FLUXOS DO SWITCH ESPECIFICADO
def scd_get_flows(switch):
dump_flow = commands.getoutput("ovs-ofctl dump-flows %s | grep -v NXST_FLOW | sed '/^$/d'"%switch)
list_flows = dump_flow.splitlines()
lista_flows = []
lista_flow = []
lista_aux = []
for flow in list_flows:
lista_f = flow.replace(' ',',').split(',')
for x in lista_f:
lista_aux = x.split('=')
if lista_aux.__len__() ==2:
dicio_flow = {lista_aux[0]:lista_aux[1]}
else:
dicio_flow = {lista_aux[0]:lista_aux[0]}
lista_flow.append(dicio_flow)
lista_flows.append(lista_flow)
lista_flow = []
return lista_flows
#REALIZA A COLETA DOS SWITCHES
def scd_get_switches():
dump_switches = commands.getoutput("ovs-vsctl show | grep Bridge | cut -f2 -d'\"'")
lista_dump_switches = dump_switches.splitlines()
lista_switches = []
for switch in lista_dump_switches:
lista_switches.append(switch)
return lista_switches
def scd_func_thread(scd_flow_main_list,flow1, queue):
for flow2 in scd_flow_main_list:
#Verifica se nao eh a mesma regra (mesma posicao da lista)
#Se as regras forem diferentes, faz a analise
#if flow_1 != flow_2:
if flow1 != flow2:
scd_verifica_conflito = analise_Conflito(flow1,flow2)
if scd_verifica_conflito.sugestao_resolucao != None:
flow1.conflito_nivel.append(scd_verifica_conflito.nivel_conflito)
flow1.conflito_sugestao.append(scd_verifica_conflito.sugestao_resolucao)
flow1.conflito.append(flow2.flow_id)
scd_verifica_conflito = None
queue.put(flow1)
#INICIO DA COLETA
if __name__=="__main__":
#INICIA COLETANDO OS SWITCHES
switches = scd_get_switches()
#VARIAVEL PARA COLETAR OS FLOWS
scd_flow_main_list = []
print ("Iniciando Modulo de Coleta")
tempo_ini = time.clock()
for switch in switches:
flow_match = scd_get_flows(switch)
count = flow_match.__len__()
for i in range(0,count):
#RECEBE OS CAMPOS DO MATCH
scd_flow_flows = scd_flows()
scd_flow_flows.switch = switch
for flows in flow_match[i]:
scd_flow_flows.flow_id = ("%s-%s"%(switch,i))
if 'in_port' in flows: scd_flow_flows.in_port = flows['in_port']
if 'priority' in flows: scd_flow_flows.priority = flows['priority']
if 'table' in flows: scd_flow_flows.table = flows['table']
if 'actions' in flows: scd_flow_flows.actions = flows['actions']
if 'idle_timeout' in flows: scd_flow_flows.idle_timeout = flows['idle_timeout']
if 'hard_timeout' in flows: scd_flow_flows.hard_timeout = flows['hard_timeout']
if 'dl_vlan' in flows: scd_flow_flows.dl_vlan = flows['dl_vlan']
if 'dl_src' in flows: scd_flow_flows.dl_src = flows['dl_src']
if 'dl_dst' in flows: scd_flow_flows.dl_dst = flows['dl_dst']
#CONSULTAR MANUAL OVS-OFCTL - SAO OS PROTOCOLOS QUE PODEM SER ESPECIFICADOS
if 'dl_type' in flows: scd_flow_flows.dl_type = flows['dl_type']
#####ARP/RARP/ICMP - NAO ANALISA PORTA TCP
#dl_type=0x0806.
elif 'arp' in flows: scd_flow_flows.dl_type = flows['arp']
#dl_type=0x8035.
elif 'rarp' in flows: scd_flow_flows.dl_type = flows['rarp']
#dl_type=0x0800,nw_proto=1.
elif 'icmp' in flows: scd_flow_flows.dl_type = flows['icmp']
#####
#####IP/TCP/UDP/SCTP - PROTOCOLOS QUE SAO INFORMADAS AS PORTAS TCP/UDP
#dl_type=0x0800.
elif 'ip' in flows: scd_flow_flows.dl_type = flows['ip']
#dl_type=0x0800,nw_proto=6.
elif 'tcp' in flows: scd_flow_flows.dl_type = flows['tcp']
#dl_type=0x0800,nw_proto=17.
elif 'udp' in flows: scd_flow_flows.dl_type = flows['udp']
#dl_type=0x0800,nw_proto=132.
elif 'sctp' in flows: scd_flow_flows.dl_type = flows['sctp']
if 'nw_src' in flows:
if flows['nw_src'].find("/"):
aux_nw = []
aux_nw_mask_lista = []
aux_nw_mask_str = None
ip_src = IPNetwork(flows['nw_src'])
scd_flow_flows.nw_src_full = ip_src
#COLETA ENDERECO IP (QUEBRA OS 4 CAMPOS)
for ip in ip_src.ip.words:
aux_nw.append(ip)
#COLETA NETMASK
aux_nw_mask_str = str(ip_src.netmask).split('.')
for mask in aux_nw_mask_str:
aux_nw_mask_lista.append(mask)
#ATRIBUI O VALOR PARA AS VARIAVEIS
scd_flow_flows.nw_src = aux_nw
scd_flow_flows.nw_src_mask = aux_nw_mask_lista
if 'nw_dst' in flows:
if flows['nw_dst'].find("/"):
aux_nw = []
aux_nw_mask_lista = []
aux_nw_mask_str = None
ip_dst = IPNetwork(flows['nw_dst'])
scd_flow_flows.nw_dst_full = ip_dst
#COLETA ENDERECO IP (QUEBRA OS 4 CAMPOS)
for ip in ip_dst.ip.words:
aux_nw.append(ip)
#COLETA NETMASK
aux_nw_mask_str = str(ip_dst.netmask).split('.')
for mask in aux_nw_mask_str:
aux_nw_mask_lista.append(mask)
#ATRIBUI O VALOR PARA AS VARIAVEIS
scd_flow_flows.nw_dst = aux_nw
scd_flow_flows.nw_dst_mask = aux_nw_mask_lista
if 'nw_proto' in flows: scd_flow_flows.nw_proto = flows['nw_proto']
if 'nw_tos' in flows: scd_flow_flows.nw_tos = flows['nw_tos']
if 'tp_src' in flows: scd_flow_flows.tp_src = flows['tp_src']
if 'tp_dst' in flows: scd_flow_flows.tp_dst = flows['tp_dst']
scd_flow_main_list.append(scd_flow_flows)
scd_flow_flows = None
print ("Modulo de Coleta demorou: %s" %(time.clock()-tempo_ini))
#VARIAVEL UTILIZADA PARA RECEBER O RETORNO DA ANALISE DE CONFLITO
scd_verifica_conflito = None
i = 0
count = scd_flow_main_list.__len__()
print ("Iniciando Modulo de Verificacao")
tempo_ini = time.clock()
#var para thread
queued_request = Queue.Queue()
#INICIO DA VERIFICACAO DOS FLOWS PARA A DETECCAO DE CONFLITO
#for flow_1 in scd_flow_main_list:
for flow in scd_flow_main_list:
#TENTATIVA DE PARALELIZACAO
scd_thread = threading.Thread(target=scd_func_thread, args=(scd_flow_main_list,flow,queued_request))
print scd_thread.name
print queued_request.get()
flow = queued_request.get()
print ("Modulo de Verificacao demorou: %s" %(time.clock()-tempo_ini))
print ("Iniciando Gravacao das Informacoes na Base de Dados")
tempo_ini = time.clock()
#INICIA A GRAVACAO NO BANCO DE DADOS
#LIMPA AS TABELAS DA BASE DE DADOS
dj_comutador = ScdComutador.objects.all().delete()
dj_flow = ScdFlow.objects.all().delete()
df_conflito = ScdConflito.objects.all().delete()
#VARIAVEL QUE DETERMINA O IDENTIFICADOR DO SWITCH
i=1
django.setup()
"""
#REALIZA O INSERT DOS SWITCHES NA TABELA SCD_COMUTADOR
for switch in switches:
dj_comutador = ScdComutador.objects.create(comut_id=i,comut_nome=switch)
dj_comutador.save()
i += 1
#REALIZA O INSERT DAS REGRAS NA TABELA SCD_FLOWS
for flow in scd_flow_main_list:
switch = ScdComutador.objects.get(comut_nome=flow.switch)
dj_flow = ScdFlow.objects.create(fl_id=flow.flow_id,fl_flowtable=flow.table,\
id_comutador_id=switch.comut_id,fl_dl_dst=flow.dl_dst,\
fl_dl_src=flow.dl_src,fl_dl_vlan=flow.dl_vlan,\
fl_dl_type=flow.dl_type,fl_nw_src=flow.nw_src_full,\
fl_nw_dst=flow.nw_dst_full,fl_nw_tos=flow.nw_tos,\
fl_nw_proto=flow.nw_proto,fl_in_port=flow.in_port,\
fl_tp_src=flow.tp_src,fl_tp_dst=flow.tp_dst,fl_priority=flow.priority,\
fl_idle_timeout=flow.idle_timeout,fl_hard_timeout=flow.hard_timeout,\
fl_actions=flow.actions)
dj_flow.save()
#REALIZA O INSERT DOS CONFLITOS NA TABELA SCD_CONFLITOS
for flow in scd_flow_main_list:
count = flow.conflito.__len__()
for j in range(0,count):
dj_conflito = ScdConflito.objects.create(con_sugestao=flow.conflito_sugestao[j],\
con_nivel=flow.conflito_nivel[j],\
con_flow_principal_id=flow.flow_id,\
con_flow_analisada_id=flow.conflito[j])
dj_conflito.save()"""
print scd_flow_main_list.__len__()
print scd_flow_main_list[0].flow_id
print scd_flow_main_list[1].flow_id
print scd_flow_main_list[2].flow_id
print scd_flow_main_list[3].flow_id
print scd_flow_main_list[4].flow_id
print scd_flow_main_list[5].flow_id
print scd_flow_main_list[6].flow_id
print ("Processo de gravar na Base demorou: %s" %(time.clock()-tempo_ini))
|
DialogPluginManager.py
|
'''
Created on March 1, 2012
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
based on pull request 4
'''
from tkinter import Toplevel, font, messagebox, VERTICAL, HORIZONTAL, N, S, E, W
from tkinter.constants import DISABLED, ACTIVE
try:
from tkinter.ttk import Treeview, Scrollbar, Frame, Label, Button
except ImportError:
from ttk import Treeview, Scrollbar, Frame, Label, Button
from arelle import PluginManager, DialogURL
from arelle.CntlrWinTooltip import ToolTip
import os, time
try:
import regex as re
except ImportError:
import re
EMPTYLIST = []
GROUPSEP = '\x01d'
def dialogPluginManager(mainWin):
# check for updates in background
import threading
thread = threading.Thread(target=lambda cntlr=mainWin: backgroundCheckForUpdates(cntlr))
thread.daemon = True
thread.start()
def backgroundCheckForUpdates(cntlr):
cntlr.showStatus(_("Checking for updates to plug-ins")) # clear web loading status
modulesWithNewerFileDates = PluginManager.modulesWithNewerFileDates()
if modulesWithNewerFileDates:
cntlr.showStatus(_("Updates are available for these plug-ins: {0}")
.format(', '.join(modulesWithNewerFileDates)), clearAfter=5000)
else:
cntlr.showStatus(_("No updates found for plug-ins."), clearAfter=5000)
time.sleep(0.1) # Mac locks up without this, may be needed for empty ui queue?
cntlr.uiThreadQueue.put((DialogPluginManager, [cntlr, modulesWithNewerFileDates]))
class DialogPluginManager(Toplevel):
def __init__(self, mainWin, modulesWithNewerFileDates):
super(DialogPluginManager, self).__init__(mainWin.parent)
self.ENABLE = _("Enable")
self.DISABLE = _("Disable")
self.parent = mainWin.parent
self.cntlr = mainWin
# copy plugins for temporary display
self.pluginConfig = PluginManager.pluginConfig
self.pluginConfigChanged = False
self.uiClassMethodsChanged = False
self.modelClassesChanged = False
self.disclosureSystemTypesChanged = False
self.modulesWithNewerFileDates = modulesWithNewerFileDates
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", self.parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.title(_("Plug-in Manager"))
frame = Frame(self)
# left button frame
buttonFrame = Frame(frame, width=40)
buttonFrame.columnconfigure(0, weight=1)
addLabel = Label(buttonFrame, text=_("Find plug-in modules:"), wraplength=60, justify="center")
addLocalButton = Button(buttonFrame, text=_("Locally"), command=self.findLocally)
ToolTip(addLocalButton, text=_("File chooser allows selecting python module files to add (or reload) plug-ins, from the local file system."), wraplength=240)
addWebButton = Button(buttonFrame, text=_("On Web"), command=self.findOnWeb)
ToolTip(addWebButton, text=_("Dialog to enter URL full path to load (or reload) plug-ins, from the web or local file system."), wraplength=240)
addLabel.grid(row=0, column=0, pady=4)
addLocalButton.grid(row=1, column=0, pady=4)
addWebButton.grid(row=2, column=0, pady=4)
buttonFrame.grid(row=0, column=0, rowspan=2, sticky=(N, S, W), padx=3, pady=3)
# right tree frame (plugins already known to arelle)
modulesFrame = Frame(frame, width=700)
vScrollbar = Scrollbar(modulesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(modulesFrame, orient=HORIZONTAL)
self.modulesView = Treeview(modulesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=7)
self.modulesView.grid(row=0, column=0, sticky=(N, S, E, W))
self.modulesView.bind('<<TreeviewSelect>>', self.moduleSelect)
hScrollbar["command"] = self.modulesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.modulesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
modulesFrame.columnconfigure(0, weight=1)
modulesFrame.rowconfigure(0, weight=1)
modulesFrame.grid(row=0, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.modulesView.focus_set()
self.modulesView.column("#0", width=120, anchor="w")
self.modulesView.heading("#0", text=_("Name"))
self.modulesView["columns"] = ("author", "ver", "status", "date", "update", "descr", "license")
self.modulesView.column("author", width=100, anchor="w", stretch=False)
self.modulesView.heading("author", text=_("Author"))
self.modulesView.column("ver", width=50, anchor="w", stretch=False)
self.modulesView.heading("ver", text=_("Version"))
self.modulesView.column("status", width=50, anchor="w", stretch=False)
self.modulesView.heading("status", text=_("Status"))
self.modulesView.column("date", width=70, anchor="w", stretch=False)
self.modulesView.heading("date", text=_("File Date"))
self.modulesView.column("update", width=50, anchor="w", stretch=False)
self.modulesView.heading("update", text=_("Update"))
self.modulesView.column("descr", width=200, anchor="w", stretch=False)
self.modulesView.heading("descr", text=_("Description"))
self.modulesView.column("license", width=70, anchor="w", stretch=False)
self.modulesView.heading("license", text=_("License"))
classesFrame = Frame(frame)
vScrollbar = Scrollbar(classesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(classesFrame, orient=HORIZONTAL)
self.classesView = Treeview(classesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=5)
self.classesView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.classesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.classesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
classesFrame.columnconfigure(0, weight=1)
classesFrame.rowconfigure(0, weight=1)
classesFrame.grid(row=1, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.classesView.focus_set()
self.classesView.column("#0", width=200, anchor="w")
self.classesView.heading("#0", text=_("Class"))
self.classesView["columns"] = ("modules",)
self.classesView.column("modules", width=500, anchor="w", stretch=False)
self.classesView.heading("modules", text=_("Modules"))
# bottom frame module info details
moduleInfoFrame = Frame(frame, width=700)
moduleInfoFrame.columnconfigure(1, weight=1)
self.moduleNameLabel = Label(moduleInfoFrame, wraplength=600, justify="left",
font=font.Font(family='Helvetica', size=12, weight='bold'))
self.moduleNameLabel.grid(row=0, column=0, columnspan=4, sticky=W)
self.moduleAuthorHdr = Label(moduleInfoFrame, text=_("author:"), state=DISABLED)
self.moduleAuthorHdr.grid(row=1, column=0, sticky=W)
self.moduleAuthorLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleAuthorLabel.grid(row=1, column=1, columnspan=3, sticky=W)
self.moduleDescrHdr = Label(moduleInfoFrame, text=_("description:"), state=DISABLED)
self.moduleDescrHdr.grid(row=2, column=0, sticky=W)
self.moduleDescrLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleDescrLabel.grid(row=2, column=1, columnspan=3, sticky=W)
self.moduleClassesHdr = Label(moduleInfoFrame, text=_("classes:"), state=DISABLED)
self.moduleClassesHdr.grid(row=3, column=0, sticky=W)
self.moduleClassesLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleClassesLabel.grid(row=3, column=1, columnspan=3, sticky=W)
ToolTip(self.moduleClassesLabel, text=_("List of classes that this plug-in handles."), wraplength=240)
self.moduleUrlHdr = Label(moduleInfoFrame, text=_("URL:"), state=DISABLED)
self.moduleUrlHdr.grid(row=4, column=0, sticky=W)
self.moduleUrlLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleUrlLabel.grid(row=4, column=1, columnspan=3, sticky=W)
ToolTip(self.moduleUrlLabel, text=_("URL of plug-in module (local file path or web loaded file)."), wraplength=240)
self.moduleDateHdr = Label(moduleInfoFrame, text=_("date:"), state=DISABLED)
self.moduleDateHdr.grid(row=5, column=0, sticky=W)
self.moduleDateLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleDateLabel.grid(row=5, column=1, columnspan=3, sticky=W)
ToolTip(self.moduleDateLabel, text=_("Date of currently loaded module file (with parenthetical node when an update is available)."), wraplength=240)
self.moduleLicenseHdr = Label(moduleInfoFrame, text=_("license:"), state=DISABLED)
self.moduleLicenseHdr.grid(row=6, column=0, sticky=W)
self.moduleLicenseLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleLicenseLabel.grid(row=6, column=1, columnspan=3, sticky=W)
self.moduleImportsHdr = Label(moduleInfoFrame, text=_("imports:"), state=DISABLED)
self.moduleImportsHdr.grid(row=7, column=0, sticky=W)
self.moduleImportsLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleImportsLabel.grid(row=7, column=1, columnspan=3, sticky=W)
self.moduleEnableButton = Button(moduleInfoFrame, text=self.ENABLE, state=DISABLED, command=self.moduleEnable)
ToolTip(self.moduleEnableButton, text=_("Enable/disable plug in."), wraplength=240)
self.moduleEnableButton.grid(row=8, column=1, sticky=E)
self.moduleReloadButton = Button(moduleInfoFrame, text=_("Reload"), state=DISABLED, command=self.moduleReload)
ToolTip(self.moduleReloadButton, text=_("Reload/update plug in."), wraplength=240)
self.moduleReloadButton.grid(row=8, column=2, sticky=E)
self.moduleRemoveButton = Button(moduleInfoFrame, text=_("Remove"), state=DISABLED, command=self.moduleRemove)
ToolTip(self.moduleRemoveButton, text=_("Remove plug in from plug in table (does not erase the plug in's file)."), wraplength=240)
self.moduleRemoveButton.grid(row=8, column=3, sticky=E)
moduleInfoFrame.grid(row=2, column=0, columnspan=5, sticky=(N, S, E, W), padx=3, pady=3)
moduleInfoFrame.config(borderwidth=4, relief="groove")
okButton = Button(frame, text=_("Close"), command=self.ok)
ToolTip(okButton, text=_("Accept and changes (if any) and close dialog."), wraplength=240)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
ToolTip(cancelButton, text=_("Cancel changes (if any) and close dialog."), wraplength=240)
okButton.grid(row=3, column=3, sticky=(S,E), pady=3)
cancelButton.grid(row=3, column=4, sticky=(S,E), pady=3, padx=3)
enableDisableFrame = Frame(frame)
enableDisableFrame.grid(row=3, column=1, sticky=(S,W), pady=3)
enableAllButton = Button(enableDisableFrame, text=_("Enable All"), command=self.enableAll)
ToolTip(enableAllButton, text=_("Enable all plug ins."), wraplength=240)
disableAllButton = Button(enableDisableFrame, text=_("Disable All"), command=self.disableAll)
ToolTip(disableAllButton, text=_("Disable all plug ins."), wraplength=240)
enableAllButton.grid(row=1, column=1)
disableAllButton.grid(row=1, column=2)
self.loadTreeViews()
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def loadTreeViews(self):
self.selectedModule = None
# clear previous treeview entries
for previousNode in self.modulesView.get_children(""):
self.modulesView.delete(previousNode)
def loadSubtree(parentNode, moduleItems):
for moduleItem in sorted(moduleItems, key=lambda item: item[0]):
moduleInfo = moduleItem[1]
if parentNode or not moduleInfo.get("isImported"):
nodeName = moduleItem[0]
if parentNode:
nodeName = parentNode + GROUPSEP + nodeName
name = moduleInfo.get("name", nodeName)
node = self.modulesView.insert(parentNode, "end", nodeName, text=name)
self.modulesView.set(node, "author", moduleInfo.get("author"))
self.modulesView.set(node, "ver", moduleInfo.get("version"))
self.modulesView.set(node, "status", moduleInfo.get("status"))
self.modulesView.set(node, "date", moduleInfo.get("fileDate"))
if name in self.modulesWithNewerFileDates:
self.modulesView.set(node, "update", _("available"))
self.modulesView.set(node, "descr", moduleInfo.get("description"))
self.modulesView.set(node, "license", moduleInfo.get("license"))
if moduleInfo.get("imports"):
loadSubtree(node, [(importModuleInfo["name"],importModuleInfo)
for importModuleInfo in moduleInfo["imports"]])
loadSubtree("", self.pluginConfig.get("modules", {}).items())
# clear previous treeview entries
for previousNode in self.classesView.get_children(""):
self.classesView.delete(previousNode)
for i, classItem in enumerate(sorted(self.pluginConfig.get("classes", {}).items())):
className, moduleList = classItem
node = self.classesView.insert("", "end", className, text=className)
self.classesView.set(node, "modules", ', '.join(moduleList))
self.moduleSelect() # clear out prior selection
def ok(self, event=None):
if self.pluginConfigChanged:
PluginManager.pluginConfig = self.pluginConfig
PluginManager.pluginConfigChanged = True
PluginManager.reset() # force reloading of modules
if self.uiClassMethodsChanged or self.modelClassesChanged or self.disclosureSystemTypesChanged: # may require reloading UI
affectedItems = ""
if self.uiClassMethodsChanged:
affectedItems += _("menus of the user interface")
if self.modelClassesChanged:
if self.uiClassMethodsChanged:
affectedItems += _(" and ")
affectedItems += _("model objects of the processor")
if (self.uiClassMethodsChanged or self.modelClassesChanged):
affectedItems += _(" and ")
if self.disclosureSystemTypesChanged:
if (self.uiClassMethodsChanged or self.modelClassesChanged):
affectedItems += _(" and ")
affectedItems += _("disclosure system types")
if messagebox.askyesno(_("User interface plug-in change"),
_("A change in plug-in class methods may have affected {0}. "
"Please restart Arelle to due to these changes. \n\n"
"Should Arelle restart itself now "
"(if there are any unsaved changes they would be lost!)?"
).format(affectedItems),
parent=self):
self.cntlr.uiThreadQueue.put((self.cntlr.quit, [None, True]))
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def moduleSelect(self, *args):
node = (self.modulesView.selection() or (None,))[0]
if node:
node = node.rpartition(GROUPSEP)[2] # drop leading path names for module name
moduleInfo = self.pluginConfig.get("modules", {}).get(node)
if moduleInfo:
self.selectedModule = node
name = moduleInfo["name"]
self.moduleNameLabel.config(text=name)
self.moduleAuthorHdr.config(state=ACTIVE)
self.moduleAuthorLabel.config(text=moduleInfo["author"])
self.moduleDescrHdr.config(state=ACTIVE)
self.moduleDescrLabel.config(text=moduleInfo["description"])
self.moduleClassesHdr.config(state=ACTIVE)
self.moduleClassesLabel.config(text=', '.join(moduleInfo["classMethods"]))
self.moduleUrlHdr.config(state=ACTIVE)
self.moduleUrlLabel.config(text=moduleInfo["moduleURL"])
self.moduleDateHdr.config(state=ACTIVE)
self.moduleDateLabel.config(text=moduleInfo["fileDate"] + " " +
(_("(an update is available)") if name in self.modulesWithNewerFileDates else ""))
self.moduleLicenseHdr.config(state=ACTIVE)
self.moduleLicenseLabel.config(text=moduleInfo["license"])
if moduleInfo.get("imports"):
self.moduleImportsHdr.config(state=ACTIVE)
_text = ", ".join(mi["name"] for mi in moduleInfo["imports"][:3])
if len(moduleInfo["imports"]) >= 3:
_text += ", ..."
self.moduleImportsLabel.config(text=_text)
_buttonState = DISABLED if moduleInfo.get("isImported") else ACTIVE
self.moduleEnableButton.config(state=_buttonState,
text={"enabled":self.DISABLE,
"disabled":self.ENABLE}[moduleInfo["status"]])
self.moduleReloadButton.config(state=_buttonState)
self.moduleRemoveButton.config(state=_buttonState)
else:
self.selectedModule = None
self.moduleNameLabel.config(text="")
self.moduleAuthorHdr.config(state=DISABLED)
self.moduleAuthorLabel.config(text="")
self.moduleDescrHdr.config(state=DISABLED)
self.moduleDescrLabel.config(text="")
self.moduleClassesHdr.config(state=DISABLED)
self.moduleClassesLabel.config(text="")
self.moduleUrlHdr.config(state=DISABLED)
self.moduleUrlLabel.config(text="")
self.moduleDateHdr.config(state=DISABLED)
self.moduleDateLabel.config(text="")
self.moduleLicenseHdr.config(state=DISABLED)
self.moduleLicenseLabel.config(text="")
self.moduleImportsHdr.config(state=DISABLED)
self.moduleImportsLabel.config(text="")
self.moduleEnableButton.config(state=DISABLED, text=self.ENABLE)
self.moduleReloadButton.config(state=DISABLED)
self.moduleRemoveButton.config(state=DISABLED)
def findLocally(self):
initialdir = self.cntlr.pluginDir # default plugin directory
if not self.cntlr.isMac: # can't navigate within app easily, always start in default directory
initialdir = self.cntlr.config.setdefault("pluginOpenDir", initialdir)
filename = self.cntlr.uiFileDialog("open",
parent=self,
title=_("Choose plug-in module file"),
initialdir=initialdir,
filetypes=[(_("Python files"), "*.py")],
defaultextension=".py")
if filename:
# check if a package is selected (any file in a directory containing an __init__.py
#if (os.path.basename(filename) == "__init__.py" and os.path.isdir(os.path.dirname(filename)) and
# os.path.isfile(filename)):
# filename = os.path.dirname(filename) # refer to the package instead
self.cntlr.config["pluginOpenDir"] = os.path.dirname(filename)
moduleInfo = PluginManager.moduleModuleInfo(filename)
self.loadFoundModuleInfo(moduleInfo, filename)
def findOnWeb(self):
url = DialogURL.askURL(self)
if url: # url is the in-cache or local file
moduleInfo = PluginManager.moduleModuleInfo(url)
self.cntlr.showStatus("") # clear web loading status
self.loadFoundModuleInfo(moduleInfo, url)
def loadFoundModuleInfo(self, moduleInfo, url):
if moduleInfo and moduleInfo.get("name"):
self.addPluginConfigModuleInfo(moduleInfo)
self.loadTreeViews()
else:
messagebox.showwarning(_("Module is not itself a plug-in or in a directory with package __init__.py plug-in. "),
_("File does not itself contain a python program with an appropriate __pluginInfo__ declaration: \n\n{0}")
.format(url),
parent=self)
def checkIfImported(self, moduleInfo):
if moduleInfo.get("isImported"):
messagebox.showwarning(_("Plug-in is imported by a parent plug-in. "),
_("Plug-in has a parent, please request operation on the parent: \n\n{0}")
.format(moduleInfo.get("name")),
parent=self)
return True
return False
def removePluginConfigModuleInfo(self, name):
moduleInfo = self.pluginConfig["modules"].get(name)
if moduleInfo:
if self.checkIfImported(moduleInfo):
return;
def _removePluginConfigModuleInfo(moduleInfo):
_name = moduleInfo.get("name")
if _name:
for classMethod in moduleInfo["classMethods"]:
classMethods = self.pluginConfig["classes"].get(classMethod)
if classMethods and _name in classMethods:
classMethods.remove(_name)
if not classMethods: # list has become unused
del self.pluginConfig["classes"][classMethod] # remove class
if classMethod.startswith("CntlrWinMain.Menu"):
self.uiClassMethodsChanged = True # may require reloading UI
elif classMethod == "ModelObjectFactory.ElementSubstitutionClasses":
self.modelClassesChanged = True # model object factor classes changed
elif classMethod == "DisclosureSystem.Types":
self.disclosureSystemTypesChanged = True # disclosure system types changed
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_removePluginConfigModuleInfo(importModuleInfo)
self.pluginConfig["modules"].pop(_name, None)
_removePluginConfigModuleInfo(moduleInfo)
self.pluginConfigChanged = True
def addPluginConfigModuleInfo(self, moduleInfo):
if self.checkIfImported(moduleInfo):
return;
name = moduleInfo.get("name")
self.removePluginConfigModuleInfo(name) # remove any prior entry for this module
def _addPlugin(moduleInfo):
_name = moduleInfo.get("name")
if _name:
self.modulesWithNewerFileDates.discard(_name) # no longer has an update available
self.pluginConfig["modules"][_name] = moduleInfo
# add classes
for classMethod in moduleInfo["classMethods"]:
classMethods = self.pluginConfig["classes"].setdefault(classMethod, [])
if name not in classMethods:
classMethods.append(_name)
if classMethod.startswith("CntlrWinMain.Menu"):
self.uiClassMethodsChanged = True # may require reloading UI
elif classMethod == "ModelObjectFactory.ElementSubstitutionClasses":
self.modelClassesChanged = True # model object factor classes changed
elif classMethod == "DisclosureSystem.Types":
self.disclosureSystemTypesChanged = True # disclosure system types changed
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_addPlugin(importModuleInfo)
_addPlugin(moduleInfo)
self.pluginConfigChanged = True
def moduleEnable(self):
if self.selectedModule in self.pluginConfig["modules"]:
moduleInfo = self.pluginConfig["modules"][self.selectedModule]
if self.checkIfImported(moduleInfo):
return;
def _moduleEnable(moduleInfo):
if self.moduleEnableButton['text'] == self.ENABLE:
moduleInfo["status"] = "enabled"
elif self.moduleEnableButton['text'] == self.DISABLE:
moduleInfo["status"] = "disabled"
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_moduleEnable(importModuleInfo)
_moduleEnable(moduleInfo)
if self.moduleEnableButton['text'] == self.ENABLE:
self.moduleEnableButton['text'] = self.DISABLE
elif self.moduleEnableButton['text'] == self.DISABLE:
self.moduleEnableButton['text'] = self.ENABLE
self.pluginConfigChanged = True
self.loadTreeViews()
def moduleReload(self):
if self.selectedModule in self.pluginConfig["modules"]:
url = self.pluginConfig["modules"][self.selectedModule].get("moduleURL")
if url:
moduleInfo = PluginManager.moduleModuleInfo(url, reload=True)
if moduleInfo:
if self.checkIfImported(moduleInfo):
return;
self.addPluginConfigModuleInfo(moduleInfo)
self.loadTreeViews()
self.cntlr.showStatus(_("{0} reloaded").format(moduleInfo["name"]), clearAfter=5000)
else:
messagebox.showwarning(_("Module error"),
_("File or module cannot be reloaded: \n\n{0}")
.format(url),
parent=self)
def moduleRemove(self):
if self.selectedModule in self.pluginConfig["modules"]:
self.removePluginConfigModuleInfo(self.selectedModule)
self.pluginConfigChanged = True
self.loadTreeViews()
def enableAll(self):
self.enableDisableAll(True)
def disableAll(self):
self.enableDisableAll(False)
def enableDisableAll(self, doEnable):
for moduleName, moduleInfo in self.pluginConfig["modules"].items():
if not moduleInfo.get("isImported"):
def _enableDisableAll(moduleInfo):
if doEnable:
moduleInfo["status"] = "enabled"
else:
moduleInfo["status"] = "disabled"
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_enableDisableAll(importModuleInfo)
_enableDisableAll(moduleInfo)
if doEnable:
self.moduleEnableButton['text'] = self.DISABLE
else:
self.moduleEnableButton['text'] = self.ENABLE
self.pluginConfigChanged = True
self.loadTreeViews()
|
task.py
|
from dataclasses import dataclass
from threading import Thread
from queue import Queue
@dataclass
class ProgressUpdate:
progress: float
message: str
fail: bool = False
class Task:
thread: Thread
output: Queue
def __init__(self, target, prompt):
def thread_run(msg, out):
target(msg, out)
out.put(ProgressUpdate(1, 'task complete'))
self.output = Queue()
self.thread = Thread(target=thread_run, args=(prompt, self.output))
self.thread.start()
def join(self):
self.thread.join()
def read_progress(self) -> ProgressUpdate:
return self.output.get()
def running(self):
return self.thread.is_alive()
|
ssr_check.py
|
#!/usr/bin/env python3
import requests
import time
import threading
from ssshare.ss import ss_local
import random
def test_connection(
url='http://cip.cc',
headers={'User-Agent': 'ShadowSocksShare/web/crawler libcurl/7.21.3 OpenSSL/0.9.8o zlib/1.2.3.4 libidn/1.18'},
proxies=None, port=1080, timeout=10):
if not proxies:
proxies = {'http': 'socks5://localhost:{}'.format(port), 'https': 'socks5://localhost:{}'.format(port)}
ok = False
content = ''
try:
start = time.time()
respond = requests.get(url, headers=headers, proxies=proxies, timeout=timeout)
if respond.ok:
ok = (time.time() - start) * 1000
else:
ok = respond.ok
content = respond.text
except Exception as e:
print(e)
content = repr(e)
return ok, content
def test_socks_server(dictionary=None, str_json=None, port=None):
if not port:
port = random.randint(2000, 3000)
try:
try:
loop, tcps, udps = ss_local.main(
dictionary=dictionary, str_json=str_json, port=port)
except Exception as e:
print(e)
return -1, 'SSR start failed'
try:
t = threading.Thread(target=loop.run)
t.start()
time.sleep(3)
conn, content = test_connection(port=port)
loop.stop()
t.join()
tcps.close(next_tick=True)
udps.close(next_tick=True)
time.sleep(1)
return conn, content
except Exception as e:
print(e)
return -2, 'Thread or Connection to website failed'
except SystemExit as e:
return e.code - 10, 'Unknown failure'
def validate(websites):
for servers in websites:
print(servers['info'])
for server in servers['data']:
result, info = test_socks_server(str_json=server['json'])
print('>' * 10, '结果:', result)
if result > 0:
print('>' * 10, '测试通过!')
elif result == -1:
print(server['json'])
server['status'] = result
server['content'] = info
return websites
if __name__ == '__main__':
print(test_connection())
|
VasoTracker.py
|
##################################################
## VasoTracker Pressure Myograph Software
##
## This software provides diameter measurements (inner and outer) of pressurised blood vessels
## Designed to work with Thorlabs DCC1545M
## For additional info see www.vasostracker.com and https://github.com/VasoTracker/VasoTracker
##
##################################################
##
## BSD 3-Clause License
##
## Copyright (c) 2018, VasoTracker
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## ## * Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
##
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## * Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##################################################
##
## Author: Penelope F Lawton, Matthew D Lee, and Calum Wilson
## Copyright: Copyright 2018, VasoTracker
## Credits: Penelope F Lawton, Matthew D Lee, and Calum Wilson
## License: BSD 3-Clause License
## Version: 1.1.0
## Maintainer: Calum Wilson
## Email: vasotracker@gmail.com
## Status: Production
## Last updated: 20191117
##
##################################################
## We found the following to be useful:
## https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch09s07.html
## http://code.activestate.com/recipes/82965-threads-tkinter-and-asynchronous-io/
## https://www.physics.utoronto.ca/~phy326/python/Live_Plot.py
## http://forum.arduino.cc/index.php?topic=225329.msg1810764#msg1810764
## https://stackoverflow.com/questions/9917280/using-draw-in-pil-tkinter
## https://stackoverflow.com/questions/37334106/opening-image-on-canvas-cropping-the-image-and-update-the-canvas
from __future__ import division
# Tkinter imports
import Tkinter as tk
from Tkinter import *
import tkSimpleDialog
import tkMessageBox as tmb
import tkFileDialog
import ttk
from PIL import Image, ImageTk #convert cv2 image to tkinter
E = tk.E
W = tk.W
N = tk.N
S = tk.S
ypadding = 1.5 #ypadding just to save time - used for both x and y
# Other imports
import os
import sys
import time
import datetime
import threading
import random
import Queue
import numpy as np
import cv2
import csv
from skimage import io
import skimage
from skimage import measure
import serial
import win32com.client
import webbrowser
from ConfigParser import SafeConfigParser
from collections import deque
# Import Vasotracker functions
import VTutils
from VT_Arduino import Arduino
from VT_Diameter import Calculate_Diameter
# Add MicroManager to path
import sys
MM_PATH = os.path.join('C:', os.path.sep, 'Program Files','Micro-Manager-1.4')
sys.path.append(MM_PATH)
os.environ['PATH'] = MM_PATH + ';' + os.environ['PATH']
import MMCorePy
# matplotlib imports
import matplotlib
#matplotlib.use('Qt5Agg')
#matplotlib.use('Qt4Agg', warn=True)
import matplotlib.backends.tkagg as tkagg
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import matplotlib.pyplot as plt
from matplotlib.backends import backend_qt4agg
from matplotlib import pyplot
from matplotlib.transforms import Bbox
#Disable garbage collection
import gc
gc.disable()
##################################################
## GUI main application
##################################################
class GuiPart(tk.Frame):
#Initialisation function
def __init__(self, master, queue,endCommand, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.master = master #this is root
self.queue = queue
self.endApplication = endCommand
#Set up the GUI
self.grid(sticky=N+S+E+W)
top = self.winfo_toplevel()
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
self.filename = self.get_file_name()
print self.filename
# Arduino
self.Arduino = Arduino(self)
self.ports = self.Arduino.getports()
# Timing functions
self.timeit = TimeIt()
self.timeit2 = TimeIt2()
# Load the config file
parser = SafeConfigParser()
parser.read('default_settings.ini')
# Initial Values
self.OD = None
# Scale setting
try:
self.multiplication_factor = float(parser.get('Acquisition', 'Scale'))# 1 # Scale setting
except:
self.multiplication_factor = 1
self.init_multiplication_factor = self.multiplication_factor
# Exposure setting
try:
self.exposure = float(parser.get('Acquisition', 'Exposure'))# 1 # Scale setting
except:
self.exposure = 250
self.init_exposure = self.exposure
# Pixel clock setting
global pix_clock
try:
pix_clock = float(parser.get('Acquisition', 'Pixel_clock'))# 1 # Scale setting
except:
pix_clock = 10
self.pix_clock = pix_clock
self.init_pix_clock = pix_clock
# Minimum frame rate setting
global Force_min_interval
try:
Force_min_interval = float(parser.get('Acquisition', 'Force_min_interval'))# 1 # Scale setting
except:
Force_min_interval = 0.250
self.Force_min_interval = Force_min_interval
# Minimum frame rate setting
global Force_faster_interval
try:
Force_faster_interval = float(parser.get('Acquisition', 'Force_min_interval'))# 1 # Scale setting
except:
Force_faster_interval = 0.125
self.Force_faster_interval = Force_faster_interval
# Record interval setting
global rec_interval
try:
rec_interval = float(parser.get('Acquisition', 'Recording_interval'))# 1 # Scale setting
except:
rec_interval = 240
self.rec_interval = rec_interval
self.init_rec_interval = rec_interval
# Data acquisition lines setting
try:
self.num_lines = float(parser.get('Analysis', '#_of_lines'))# 1 # Scale setting
except:
self.num_lines = 10
# Smoothing setting
try:
self.smooth_factor = int(parser.get('Analysis', 'Smooth'))# 1 # Scale setting
except:
self.smooth_factor = 21
# Integration setting
try:
self.integration_factor = int(parser.get('Analysis', 'Integration'))# 1 # Scale setting
except:
self.integration_factor = 20
# Threshold setting
try:
self.thresh_factor = float(parser.get('Analysis', 'Threshold'))# 1 # Scale setting
except:
self.thresh_factor = 3.5
# Graph settings
try:
self.x_min_default = int(parser.get('Graph axes', 'x-min'))
self.x_max_default = int(parser.get('Graph axes', 'x-max'))
self.y_min_default = int(parser.get('Graph axes', 'y-min'))
self.y_min_default2 = self.y_min_default
self.y_max_default = int(parser.get('Graph axes', 'y-max'))
self.y_max_default2 = self.y_max_default
except:
self.x_min_default = -600
self.x_max_default = 0
self.y_min_default = 0
self.y_min_default2 = self.y_min_default
self.y_max_default = 250
self.y_max_default2 = self.y_max_default
# Memory settings
try:
self.n_plot_points = float(parser.get('Memory', 'n_plot_points '))# 1 # Scale setting
except:
self.n_plot_points = 1200 #300
try:
self.n_data_points = float(parser.get('Memory', 'n_data_points '))# 1 # Scale setting
except:
self.n_data_points = 15000 #300
# Acquisition rate setting
self.acq_rate = np.nan
self.initUI(self.endApplication)
# Open the csv file and then clear it
f = open(self.filename.name, "w+")
f.close()
# Add the headers
with open((self.filename.name), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(("Time","Outer Diameter", "Inner Diameter", 'Temperature (oC)', 'Pressure 1 (mmHg)', 'Pressure 2 (mmHg)', 'Avg Pressure (mmHg)'))
# Add the file for the outerdiameter profiles
self.txt_file = os.path.splitext(self.filename.name)[0]
print "tail = ", self.txt_file
self.profile_file = self.txt_file + ' - ODProfiles' + '.csv'
with open((self.profile_file), 'w+') as g:
v=csv.writer(g, quoting=csv.QUOTE_ALL)
column_headings = 'Time (s)', 'Profile 1', 'Profile 2', "Profile 3", "..."
v.writerow(column_headings)
# Add the file for the innerdiameter profiles
self.profile_file2 = self.txt_file + ' - IDProfiles' + '.csv'
with open((self.profile_file2), 'w+') as g:
v=csv.writer(g, quoting=csv.QUOTE_ALL)
column_headings = 'Time (s)', 'Profile 1', 'Profile 2', "Profile 3", "..."
v.writerow(column_headings)
# Add file for table
self.txt_file = self.txt_file + ' - Table' + '.csv'
g = open(self.txt_file, "w+")
g.close()
with open((self.txt_file), 'ab') as g:
v=csv.writer(g, quoting=csv.QUOTE_ALL)
column_headings = 'Time (s)', 'Label', 'Diameter', "P1", "P2", "max_percent"
v.writerow(column_headings)
# Function for getting the save file.
def get_file_name(self):
tmb.showinfo("", "Create a file to save output...")
now = datetime.datetime.now()
savename = now.strftime("%Y%m%d")
f = tkFileDialog.asksaveasfile(mode='w', defaultextension=".csv", initialdir="Results\\", initialfile=savename)
if f:
print "f = ", f
return(f)
else: # asksaveasfile return `None` if dialog closed with "cancel".
if tmb.askquestion("No save file selected", "Do you want to quit VasoTracker?", icon='warning') == "yes":
self.endApplication()
else:
f = self.get_file_name()
return (f)
# Function for writing to the save file
def writeToFile(self,data):
with open((self.filename.name), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(data)
# Function for writing to the save file
def writeToFile2(self,data):
with open((self.profile_file), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(data)
# Function for writing to the save file
def writeToFile3(self,data):
with open((self.profile_file2), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(data)
# Function for closing down
def close_app(self):
if tmb.askokcancel("Close", "Are you sure...?"):
self.endApplication()
def gotouserguide(self):
tmb.showinfo("Hope you are connected to the internet", "Opening in your browser...")
webbrowser.open_new(r"http://www.vasotracker.com/sdm_downloads/vasotracker-acquisition-software-manual/")
def gotocontact(self):
tmb.showinfo("We would hate to hear from you", "Because it probably means there is a problem. Despite our feelings, we will do our best to help. Our contact details should pop up in your web browser...")
webbrowser.open_new(r"http://www.vasotracker.com/about/contact-us/")
def launchabout(self):
webbrowser.open_new(r"http://www.vasotracker.com/about/")
def launchupdate(self):
tmb.showinfo("We are not that clever", "So you will have to see if their is an update to download yourself... the download page should pop up in your web browser...")
webbrowser.open_new(r"http://www.vasotracker.com/downloads/acquisition-software/")
def launchsnake(self):
tmb.showinfo("We did warn you.", "Any hope of this being a productive day have just went out the window...")
import spaceinvaders
def launchmusic(self):
tmb.showinfo("We like dancing in the shower", "Whether in the lab or in the shower, these songs make us boogie...")
webbrowser.open_new(r"https://open.spotify.com/playlist/5isnlNKb6Xtm975J9rxxT0?si=U5qpBEeHTKW9S0mLe70rKQ")
# Function for defining an average checkbox ## Shouldbe in toolbar!
def average_checkbox(self, window, text):
avg_checkbox = ttk.Checkbutton(window, text=text)
avg_checkbox.grid(row=0, columnspan=4, padx=3, pady=3)
# Second Function for initialising the GUI
def initUI(self,endCommand):
# make Esc exit the program
self.master.bind('<Escape>', lambda e: endCommand)
# make the top right close button minimize (iconify) the main window
self.master.protocol("WM_DELETE_WINDOW", self.close_app)
# create a menu bar with an Exit command
menubar = tk.Menu(self.master)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Exit", command=self.close_app)
# Create a help menu
self.helpmenu = tk.Menu(menubar, tearoff=0)
self.helpmenu.add_command(label='Boogie woogie', command = self.launchmusic)
self.helpmenu.add_separator()
self.helpmenu.add_command(label='User Guide', command = self.gotouserguide)
self.helpmenu.add_command(label='Contact', command = self.gotocontact)
self.helpmenu.add_command(label='About', command = self.launchabout)
self.helpmenu.add_command(label='Update', command = self.launchupdate)
self.helpmenu.add_separator()
self.helpmenu.add_command(label='Do not click here...', command = self.launchsnake)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Help", menu=self.helpmenu)
self.master.config(menu=menubar)
self.pack(fill=BOTH, expand=1)
# Make the toolbar along the top
self.toolbar = ToolBar(self)#ttk.Frame(root, height=150)
self.toolbar.grid(row=0, column=0,rowspan=1,columnspan=4, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.toolbar.grid(sticky='nswe')
self.toolbar.rowconfigure(0, weight=1)
self.toolbar.columnconfigure(0, weight=1)
# Make the status bar along the bottom
def callback(event):
webbrowser.open_new(r"https://doi.org/10.3389/fphys.2019.00099")
self.status_bar = ttk.Label(text = 'Thank you for using VasoTracker. To support us, please cite VasoTracker (click here for the paper).', relief=SUNKEN, anchor='w')
self.status_bar.pack(side=BOTTOM, fill=X)
self.status_bar.bind("<Button-1>", callback)
# Make the graph frame
self.graphframe = GraphFrame(self)
self.graphframe.grid(row=1, column=0, rowspan=4,columnspan=2, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.graphframe.grid(sticky='nswe')
# Make the table frame
self.tableframe = TableFrame(self)
self.tableframe.grid(row=1, column=3,rowspan=1,columnspan=1, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.tableframe.grid(sticky='nwe')
#Update everything so that the frames are all the correct size. We need to do this so we can size the graph/image before we place them.
self.toolbar.update()
self.status_bar.update()
self.tableframe.update()
self.toolbar.update()
# Make the Camera Frame bottom right
self.cameraframe = CameraFrame(self)
self.cameraframe.grid(row=2, column=3,rowspan=1,columnspan=2, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.cameraframe.grid(sticky='nswe')
#print "this is the height: ", self.graphframe.winfo_height()
#print "this is the width: ", self.graphframe.winfo_width()
# Now set up the graph
self.graphframe.mainWidgets()
# Count function for reading in with FakeCamera
self.count = 0
# Count function for resizing on first image acquisition
self.count2 = 0
# For storing data
self.P1 = np.nan
self.P2 = np.nan
self.T = np.nan
self.start_time = 0
self.prev_time = 0
self.timelist = []
self.outers = []
self.inners = []
self.delta_i = 1
self.delta_i2 = 1
# Function for processing the Arduino data
def sortdata(self,temppres):
# Initialize variables
temp = np.nan
pres1 = np.nan
pres2 = np.nan
# Loop through the data from the two Arduinos (tempres contains dummy data if < 2 connected)
for data in temppres:
if len(data) > 0:
# Split the data by Arduino
val = data[0].strip('\n\r').split(';')
val = val[:-1]
val = [el.split(':') for el in val]
# Get the temperature value
if val[0][0] == "T1":
try:
temp = float(val[0][1])
except:
temp = np.nan
#set_temp = float(val[1][1])
# Get the pressure value
elif val[0][0] == "P1":
try:
pres1 = float(val[0][1])
pres2 = float(val[1][1])
except:
pres1 = np.nan
pres2 = np.nan
else:
pass
else:
pass
return pres1,pres2,temp
# This function will process all of the incoming images
def processIncoming(self):
"""Handle all messages currently in the queue, if any."""
while self.queue.qsize( ):
#print "Queue size = ", self.queue.qsize( )
#with self.timeit2("Total"): # time for optimisation
try:
#Get the image
msg = self.queue.get(0)
got_image = True
except:
got_image = False
if got_image == True:
# On first loop, reset the time
if self.toolbar.record_flag:
if self.count == 0:
self.start_time=time.time()
# This is for loading in the example data!
try:
mmc.setProperty('Focus', "Position", self.count%500)
except:
pass
# Get the arduino data
#with self.timeit("Get Arduino"): # time for optimisation
temppres = self.Arduino.getData()
self.P1,self.P2,self.T = self.sortdata(temppres)
# Save raw image:
if self.toolbar.record_is_checked.get() == 1 and self.count%self.rec_interval == 0:
directory = os.path.join(head, self.filename.name[:-4]+'\\RawTiff\\')
if not os.path.exists(directory):
os.makedirs(directory)
else:
pass
gfxPath = os.path.join(directory, '%s_f=%s.tiff' % (os.path.splitext(tail)[0],str(int(self.count/self.rec_interval)).zfill(6)))
skimage.io.imsave(gfxPath, msg)
else:
pass
# Process the acquired image
self.timenow = time.time() - self.start_time #Get the time
#Get acquisition rate
try:
self.acq_rate = 1/(self.timenow - self.prev_time)
except:
pass
self.prev_time = self.timenow
# Get ROI
if self.toolbar.ROI_is_checked.get() == 1: # Get ROI
self.ROI = ((self.cameraframe.start_x,self.cameraframe.start_y), (self.cameraframe.end_x, self.cameraframe.end_y))
else: # Set ROI to image bounds
self.ROI = ((0,0),(int(msg.shape[1]),int(msg.shape[0])) )
# Calculate diameter
self.diam = Calculate_Diameter(self,self.num_lines,self.multiplication_factor,self.smooth_factor, self.thresh_factor,
self.integration_factor,self.ROI, self.toolbar.ID_is_checked)
#with self.timeit("Calculate diameter"): # time for optimisation
outer_diameters1,outer_diameters2,inner_diameters1,inner_diameters2,start,diff, ODS_flag,IDS_flag,ODlist,IDlist = self.diam.calc(msg, self.cameraframe.delta_height, self.cameraframe.delta_width, self.cameraframe.scale_factor)
params = outer_diameters1,outer_diameters2,inner_diameters1,inner_diameters2,start,diff,ODS_flag,IDS_flag, self.ROI
#with self.timeit("process queue!"): # time for optimisation
self.cameraframe.process_queue(params,msg,self.count2)
#with self.timeit("append"):
# Only add every delta_i data point to the lists
if self.count%self.delta_i == 0:
self.timelist.append(self.timenow)
self.outers.append(self.diam.OD) # Full list
self.inners.append(self.diam.ID) # Full list
else:
pass
# When we reach the list length limit, subsample the data. (And only add every delta_i data point to the list above)
self.i = int(len(self.timelist))
if self.i == self.n_data_points :
self.delta_i *= 2
# frequency of plotted points
self.timelist = self.timelist[0::2]
self.outers = self.outers[0::2]
self.inners = self.inners[0::2]
else:
pass
# Subtract off the latest time point, so that the current time is t = 0
xdata = map(lambda x: x - self.timelist[-1], self.timelist)
# Get the xdata points that fit within the axis limits
xdata3 = filter(lambda x: x >= xdata[-1]-abs(self.toolbar.xlims[0]), xdata)
xdata4 = filter(lambda x: x >= xdata[-1]-abs(self.toolbar.xlims[0]), xdata)
# Get the corresponding ydata points
ydata1 = self.outers[len(self.outers)-len(xdata3)::]#[0::10]
ydata2 = self.inners[len(self.outers)-len(xdata4)::]#[0::10]
# Subsample plotted dat
self.i2 = int(len(xdata3))
if self.i2 > self.n_plot_points :
self.delta_i2 = np.ceil(self.i2/self.n_plot_points)
else:
self.delta_i2 = 1
xdata3 = xdata3[0::int(self.delta_i2)]
ydata1 = ydata1[0::int(self.delta_i2)]
ydata2 = ydata2[0::int(self.delta_i2)]
#with self.timeit("plot the graph"): # time for optimisation
self.graphframe.plot(xdata3,ydata1,ydata2,self.toolbar.xlims, self.toolbar.ylims, self.toolbar.ylims2)
self.count += 1
# Update the tkinter widgets
self.toolbar.update_acq_rate(self.acq_rate)
self.toolbar.update_time(self.timenow)
self.toolbar.update_temp(self.T) #### CHANGE to T
self.toolbar.update_pressure(self.P1,self.P2, (self.P1+self.P2)/2)
self.toolbar.update_diam(self.diam.OD,self.diam.ID)
# Write data to file
savedata = self.timenow,self.diam.OD,self.diam.ID, self.T, self.P1, self.P2, (self.P1+self.P2)/2
savedata2 = [self.timenow]+ODlist
savedata3 = [self.timenow]+IDlist
self.writeToFile(savedata)
self.writeToFile2(savedata2)
self.writeToFile3(savedata3)
#Need to access the outer diameter from the toolbar
self.OD = self.diam.OD
else:
params = 0,0,0,0,0,0,0,0,0
self.cameraframe.process_queue(params,msg,self.count2)
self.count2 += 1
else:
pass
##################################################
## Camera setup
##################################################
class setCamera(object):
def __init__(self,parent, camera_label):
self.parent = parent
camera_label = camera_label
self.DEVICE = None
# Factors for scaling ROI to original image (which is scaled to fit canvas)
self.delta_width = 0
self.delta_height = 0
self.scale_factor = 1
def set_exp(self,exposure):
mmc.setExposure(exposure)
return
def set_pix_clock(self,pix_clock):
mmc.setProperty(self.device[0], 'PixelClockMHz', pix_clock)
return
def set(self, camera_label):
# Set up the camera
global pix_clock
mmc.reset()
mmc.enableStderrLog(False)
mmc.enableDebugLog(False)
mmc.setCircularBufferMemoryFootprint(100)# (in case of memory problems)
if camera_label == "Thorlabs":
try:
DEVICE = ["ThorCam","ThorlabsUSBCamera","ThorCam"] #camera properties - micromanager creates these in a file
self.device = DEVICE
mmc.loadDevice(*DEVICE)
mmc.initializeDevice(DEVICE[0])
mmc.setCameraDevice(DEVICE[0])
#mmc.setProperty(DEVICE[0], 'Binning', 2)
mmc.setProperty(DEVICE[0], 'HardwareGain', 1)
mmc.setProperty(DEVICE[0], 'PixelClockMHz', pix_clock)#5
mmc.setProperty(DEVICE[0], 'PixelType', '8bit')
mmc.setExposure(self.parent.parent.exposure)
except:
tmb.showinfo("Warning", "Cannot connect to camera!")
if camera_label == "OpenCV":
try:
mmc.loadSystemConfiguration('OpenCV.cfg')
mmc.setProperty('OpenCVgrabber', 'PixelType', '8bit')
mmc.setExposure(self.parent.parent.exposure)
except:
tmb.showinfo("Warning", "Cannot connect to camera!")
if camera_label == "uManagerCam":
config_loaded = False
try:
mmc.loadSystemConfiguration('MMConfig.cfg')
config_loaded = True
except:
tmb.showinfo("Warning", "MMConfig.cfg not found in home directory!")
if config_loaded:
camera = mmc.getLoadedDevicesOfType(2)[0]
mmc.getDevicePropertyNames(camera) # camera_properties =
mmc.setProperty(mmc.getLoadedDevicesOfType(2)[0], 'PixelType', '8bit')
mmc.setExposure(self.parent.parent.exposure)
elif camera_label == "FakeCamera":
DEVICE = ['Camera', 'FakeCamera', 'FakeCamera'] #camera properties - micromanager creates these in a file
mmc.loadDevice(*DEVICE)
mmc.initializeDevice(DEVICE[0])
mmc.setCameraDevice(DEVICE[0])
mmc.setExposure(self.parent.parent.exposure)
mmc.setProperty(DEVICE[0], 'PixelType', '8bit')
mmc.setProperty(DEVICE[0], 'Path mask', 'SampleData\\TEST?{4.0}?.tif') #C:\\00-Code\\00 - VasoTracker\\
# To load in a sequence
DEVICE2 = ['Focus', 'DemoCamera', 'DStage']
mmc.loadDevice(*DEVICE2)
mmc.initializeDevice(DEVICE2[0])
mmc.setFocusDevice(DEVICE2[0])
mmc.setProperty(DEVICE2[0], "Position", 0)
elif camera_label == "":
tmb.showinfo("Warning", "You need to select a camera source!")
return
##################################################
## Main toolbar
##################################################
class ToolBar(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent, height = 150)#, width=250, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
self.set_camera = setCamera(self,self.parent )
self.ref_OD = None
#Functions that do things in the toolbar
def update_temp(self, temp):
# Updates the temperature widget
tempstring = str(round(temp,2))
self.temp_contents.set(tempstring)
def update_pressure(self, P1,P2,PAvg):
# Update average pressure
pressurestring = str(round(PAvg,2))
self.pressure_contents.set(pressurestring)
def update_diam(self, OD, ID):
# Update outer diameter
OD_string = str(round(OD,2))
self.outdiam_contents.set(OD_string)
try:
OD_pcnt_string = str(round(((OD/self.parent.toolbar.ref_OD)*100),2))
self.outdiam_pcnt_contents.set(OD_pcnt_string)
except:
pass
# Update inner diameter
ID_string = str(round(ID,2))
self.indiam_contents.set(ID_string)
def update_time(self, time):
#Update the time widget
timestring = str(datetime.timedelta(seconds=time))[:-4]
self.time_contents.set(timestring)
def update_acq_rate(self, acqrate):
#Update the acq_rate widget
self.acq_rate_contents.set(str(round(acqrate,2)))
# Function that changes the exposure on enter key
def update_exposure(self,event):
global prevcontents,exposure
try:
# Check if the exposure is within a suitable range
exp = self.contents.get()
if exp < 10:
exp = 10
elif exp > 500:
exp = 500
self.exposure_entry.delete(0, 'end')
self.exposure_entry.insert('0', exp)
if exp < 100:
tmb.showinfo("Warning", "Except for ThorCam, we recommend an exposure between 100 ms and 500ms")
self.parent.exposure = int(exp)
prevcontents = exp
exposure = exp
except:
self.exposure_entry.delete(0, 'end')
self.exposure_entry.insert('0', prevcontents)
exposure = prevcontents
self.set_camera.set_exp(exposure)
self.exposure_entry.delete(0, 'end')
self.exposure_entry.insert('0', mmc.getExposure())
def update_pix_clock(self,event):
global pix_clock_prevcontents, pix_clock
try:
# Check if the exposure is within a suitable range
pix_clock = self.pix_clock_contents.get()
self.pix_clock_entry.delete(0, 'end')
self.pix_clock_entry.insert('0', pix_clock)
self.set_camera.set_pix_clock(pix_clock)
self.parent.pix_clock = int(pix_clock)
pix_clock_prevcontents = pix_clock
pix_clock = pix_clock
except:
self.pix_clock_entry.delete(0, 'end')
self.pix_clock_entry.insert('0', pix_clock_prevcontents)
pix_clock = prevcontents
def update_rec_interval(self,event):
global rec_interval, rec_prevcontents
try: # Should check contents for int rather than try and catch exception
rec = self.rec_contents.get()
self.rec_interval_entry.delete(0, 'end')
self.rec_interval_entry.insert('0', rec)
self.parent.rec_interval = int(rec)
rec_prevcontents = rec
rec_interval = rec
except:
self.rec_interval_entry.delete(0, 'end')
self.rec_interval_entry.insert('0', rec_prevcontents)
rec_interval = rec_prevcontents
def update_num_lines(self,event):
global num_lines, num_lines_prevcontents
try: # Should check contents for int rather than try and catch exception
num_lines = self.num_lines_contents.get()
if num_lines < 5:
num_lines = 5
elif num_lines > 50:
num_lines = 50
self.num_lines_entry.delete(0, 'end')
self.num_lines_entry.insert('0', num_lines)
self.parent.num_lines = int(num_lines)
num_lines_prevcontents = num_lines
num_lines = num_lines
except:
self.num_lines_entry.delete(0, 'end')
self.num_lines_entry.insert('0', num_lines_prevcontents)
num_lines = num_lines_prevcontents
def update_scale(self,event):
try:
# Check if the exposure is within a suitable range
scale = self.scale_contents.get()
self.scale_entry.delete(0, 'end')
self.scale_entry.insert('0', scale)
self.parent.multiplication_factor = scale
self.scale_prevcontents = scale
except:
self.scale_entry.delete(0, 'end')
self.scale_entry.insert('0', self.scale_prevcontents)
self.parent.multiplication_factor = self.scale_prevcontents
def update_smooth(self,event):
#global smooth_prevcontents, smooth_factor
try:
# Check if the exposure is within a suitable range
smooth = self.smooth_contents.get()
self.smooth_entry.delete(0, 'end')
self.smooth_entry.insert('0', smooth)
self.parent.smooth_factor = smooth
self.smooth_prevcontents = smooth
try:
self.parent.videoframe.update_image(self.parent.videoframe.slider.get())
except:
pass
except:
self.smooth_entry.delete(0, 'end')
self.smooth_entry.insert('0', self.smooth_prevcontents)
self.parent.smooth_factor = smooth_prevcontents
def update_integration(self,event):
try:
# Check if the exposure is within a suitable range
integration = self.integration_contents.get()
if integration < 5:
integration = 5
elif integration > 50:
integration = 50
self.integration_entry.delete(0, 'end')
self.integration_entry.insert('0', integration)
self.parent.integration_factor = integration
self.integration_prevcontents = integration
try:
self.parent.videoframe.update_image(self.parent.videoframe.slider.get())
except:
pass
except:
self.integration_entry.delete(0, 'end')
self.integration_entry.insert('0', self.integration_prevcontents)
self.parent.integration_factor = integration_prevcontents
def update_thresh(self,event):
try:
# Check if the exposure is within a suitable range
thresh = self.thresh_contents.get()
self.thresh_entry.delete(0, 'end')
self.thresh_entry.insert('0', thresh)
self.parent.thresh_factor = thresh
self.thresh_prevcontents = thresh
try:
self.parent.videoframe.update_image(self.parent.videoframe.slider.get())
except:
pass
except:
self.thresh_entry.delete(0, 'end')
self.thresh_entry.insert('0', self.thresh_prevcontents)
self.parent.thresh_factor = thresh_prevcontents
def update_camera_width(self,width):
width_string = str(width)
self.camera_width_contents.set(width_string)
def update_camera_height(self,height):
height_string = str(height)
self.camera_height_contents.set(height_string)
def update_FOV_width(self,width):
width_string = str(width)
self.FOV_width_contents.set(width_string)
def update_FOV_height(self,height):
height_string = str(height)
self.FOV_height_contents.set(height_string)
def mainWidgets(self):
self.toolbarview = ttk.Frame(self.parent.master, relief=RIDGE)
self.toolbarview.grid(row=2,column=3,rowspan=2,sticky=N+S+E+W, pady=0)
# Tool bar groups
source_group = ttk.LabelFrame(self, text='Source', height=150, width=150)
source_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
settings_group = ttk.LabelFrame(self, text='Acquisition Settings', height=150, width=150)
settings_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
ana_settings_group = ttk.LabelFrame(self, text='Analysis Settings', height=150, width=150)
ana_settings_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
outer_diameter_group = ttk.LabelFrame(self, text='Graph', height=150, width=150)
outer_diameter_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
acquisition_group = ttk.LabelFrame(self, text='Data acquisition', height=150, width=150)
acquisition_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
start_group = ttk.LabelFrame(self, text='Start/Stop', height=150, width=150)
start_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
image_size_group = ttk.LabelFrame(self, text='Image dimensions', height=150, width=150)
image_size_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
# Camera width box
camera_width_label = ttk.Label(image_size_group, text = 'Camera width:')
camera_width_label.grid(row=0, column=0, sticky=E)
self.camera_width_contents = IntVar()
self.camera_width_contents.set(0)
self.camera_width_prevcontents = self.camera_width_contents.get()
self.camera_width_entry = ttk.Entry(image_size_group, textvariable = self.camera_width_contents,width=10)
self.camera_width_entry.config(state=DISABLED)
self.camera_width_entry.grid(row=0, column=1, pady=0)
self.camera_width_entry.bind('<Return>', self.update_camera_width)
# Camera heigth box
camera_height_label = ttk.Label(image_size_group, text = 'Camera height:')
camera_height_label.grid(row=1, column=0, sticky=E)
self.camera_height_contents = IntVar()
self.camera_height_contents.set(0)
self.camera_height_prevcontents = self.camera_height_contents.get()
self.camera_height_entry = ttk.Entry(image_size_group, textvariable = self.camera_height_contents,width=10)
self.camera_height_entry.config(state=DISABLED)
self.camera_height_entry.grid(row=1, column=1, pady=0)
self.camera_height_entry.bind('<Return>', self.update_camera_height)
# FOV width box
FOV_width_label = ttk.Label(image_size_group, text = 'FOV width:')
FOV_width_label.grid(row=2, column=0, sticky=E)
self.FOV_width_contents = IntVar()
self.FOV_width_contents.set(0)
self.FOV_width_prevcontents = self.FOV_width_contents.get()
self.FOV_width_entry = ttk.Entry(image_size_group, textvariable = self.FOV_width_contents,width=10)
self.FOV_width_entry.config(state=DISABLED)
self.FOV_width_entry.grid(row=2, column=1, pady=0)
self.FOV_width_entry.bind('<Return>', self.update_FOV_width)
# FOV heigth box
FOV_height_label = ttk.Label(image_size_group, text = 'FOV height:')
FOV_height_label.grid(row=3, column=0, sticky=E)
self.FOV_height_contents = IntVar()
self.FOV_height_contents.set(0)
self.FOV_height_prevcontents = self.FOV_height_contents.get()
self.FOV_height_entry = ttk.Entry(image_size_group, textvariable = self.FOV_height_contents,width=10)
self.FOV_height_entry.config(state=DISABLED)
self.FOV_height_entry.grid(row=3, column=1, pady=0)
self.FOV_height_entry.bind('<Return>', self.update_FOV_height)
# Source group (e.g. camera and files)
camera_label = ttk.Label(source_group, text = 'Camera:')
camera_label.grid(row=0, column=0, sticky=E)
path_label = ttk.Label(source_group, text = 'Path:')
path_label.grid(row=1, column=0, sticky=E)
save_label = ttk.Label(source_group, text = 'File:')
save_label.grid(row=2, column=0, sticky=E)
# Flag Start/stop group
self.start_flag = False
def set_cam(self):
if self.start_flag == False:
camera_label = self.variable.get()
self.set_camera.set(camera_label)
self.cam_x_dim = mmc.getImageWidth()
self.cam_y_dim = mmc.getImageHeight()
self.update_camera_width(self.cam_x_dim)
self.update_camera_height(self.cam_y_dim)
self.update_FOV_width(self.cam_x_dim)
self.update_FOV_height(self.cam_y_dim)
return
else:
print "You can't change the camera whilst acquiring images!"
return
self.camoptions = ["...","Thorlabs","OpenCV", "FakeCamera", "uManagerCam"]
self.variable = StringVar()
self.variable.set(self.camoptions[0])
self.camera_entry = ttk.OptionMenu(source_group, self.variable,self.camoptions[0], *self.camoptions, command= lambda _: set_cam(self))
self.camera_entry.grid(row=0, column=1, pady=0)
global head, tail
head,tail = os.path.split(self.parent.filename.name)
print head, tail
path_entry = ttk.Entry(source_group, width=15)
path_entry.insert(0, head)
path_entry.config(state=DISABLED)
path_entry.grid(row=1, column=1, pady=0)
save_entry = ttk.Entry(source_group, width=15)
save_entry.insert(0, tail)
save_entry.config(state=DISABLED)
save_entry.grid(row=2, column=1, pady=0)
# Create radio buttons for the field of view
self.FOV_selection = IntVar(value=1) # initializing the choice, i.e. Python
self.FOV_modes = [("w x h",1), ("w/2 x h/2",2)]
FOV_modes_label = ttk.Label(source_group, text = 'FOV:')
FOV_modes_label.grid(row=3, column=0, sticky=E)
self.test = 0
def ShowChoice():
if self.test == 0:
self.cam_x_dim = mmc.getImageWidth()
self.cam_y_dim = mmc.getImageHeight()
self.test = self.test + 1
mmc.stopSequenceAcquisition()
# Need to get the dimensions of the image.
if self.FOV_modes[self.FOV_selection.get()-1][1] == 1:
try:
mmc.setROI(0, 0, self.cam_x_dim, self.cam_y_dim)
mmc.startContinuousSequenceAcquisition(0)
self.update_FOV_width(self.cam_x_dim)
self.update_FOV_height(self.cam_y_dim)
except:
self.FOV_selection.set(1)
mmc.startContinuousSequenceAcquisition(0)
tmb.showwarning(title="Oh this is unfortunate...", message = "It seems that this camera does not support this function!")
elif self.FOV_modes[self.FOV_selection.get()-1][1] == 2:
try:
mmc.setROI(int(self.cam_x_dim/4), int(self.cam_y_dim/4), int(self.cam_x_dim/2), int(self.cam_y_dim/2))
mmc.startContinuousSequenceAcquisition(0)
self.update_FOV_width(int(self.cam_x_dim/2))
self.update_FOV_height(int(self.cam_y_dim/2))
except:
self.FOV_selection.set(1)
mmc.startContinuousSequenceAcquisition(0)
tmb.showwarning(title="Oh this is unfortunate...", message = "It seems that this camera does not support this function!")
self.FOV_buttons = []
for (mode, val) in self.FOV_modes:
rb = tk.Radiobutton(source_group,
text=mode,
indicatoron = 0,
width = 10,
padx = 0, pady = 0,
variable=self.FOV_selection, command = ShowChoice,
value=val)
rb.grid(row=2+val, column=1, pady=0)
self.FOV_buttons.append(rb)
for radio_button in self.FOV_buttons:
radio_button.configure(state = DISABLED)
# Settings group (e.g. camera and files)
# Scale settings
scale_label = ttk.Label(settings_group, text = "Scale ("+u"\u03bcm/px:)")
scale_label.grid(row=0, column=0, sticky=E)
scale = self.parent.multiplication_factor
scalefloat = "%3.0f" % scale
self.scale_contents = DoubleVar()
self.scale_contents.set(scalefloat)
global scale_contents
self.scale_prevcontents = self.scale_contents.get()
self.scale_entry = ttk.Entry(settings_group, textvariable = self.scale_contents,width=10)
self.scale_entry.grid(row=0, column=1, pady=0)
self.scale_entry.bind('<Return>', self.update_scale)
self.scale_entry.configure(state="disabled")
# Exposure settings
exposure_label = ttk.Label(settings_group, text = 'Exp (ms)')
exposure_label.grid(row=1, column=0, sticky=E)
exp = self.parent.exposure
self.contents = IntVar()
self.contents.set(int(exp))
global prevcontents
prevcontents = self.contents.get()
self.exposure_entry = ttk.Entry(settings_group, textvariable = self.contents,width=10)
self.exposure_entry.grid(row=1, column=1, pady=0)
self.exposure_entry.bind('<Return>', self.update_exposure)
self.exposure_entry.configure(state="disabled")
# Pixel clock settings
pixelclock_label = ttk.Label(settings_group, text = 'Pix Clk (MHz):')
pixelclock_label.grid(row=2, column=0, sticky=E)
pix_clock = self.parent.pix_clock
self.pix_clock_contents = IntVar()
self.pix_clock_contents.set(int(pix_clock))
global pix_clock_prevcontents
pix_clock_prevcontents = self.pix_clock_contents.get()
self.pix_clock_entry = ttk.Entry(settings_group, textvariable = self.pix_clock_contents,width=10)
self.pix_clock_entry.grid(row=2, column=1, pady=0)
self.pix_clock_entry.bind('<Return>', self.update_pix_clock)
self.pix_clock_entry.configure(state="disabled")
# Acquisition rate settings
acqrate_label = ttk.Label(settings_group, text = 'Acq rate (Hz):')
acqrate_label.grid(row=3, column=0, sticky=E)
self.acq_rate_contents = DoubleVar()
self.acq_rate_contents.set("%3.0f" % self.parent.acq_rate)
self.acq_rate__entry = ttk.Entry(settings_group, textvariable = self.acq_rate_contents, width=10)
self.acq_rate__entry.config(state=DISABLED)
self.acq_rate__entry.grid(row=3, column=1, pady=0)
# Record interval settings
rec_interval_label = ttk.Label(settings_group, text = 'Rec intvl (f):')
rec_interval_label.grid(row=4, column=0, sticky=E)
rec_interval = self.parent.rec_interval
self.rec_contents = IntVar()
self.rec_contents.set(int(rec_interval))
global rec_prevcontents
rec_prevcontents = self.rec_contents.get()
self.rec_interval_entry = ttk.Entry(settings_group, textvariable = self.rec_contents,width=10)
self.rec_interval_entry.grid(row=4, column=1, pady=0)
self.rec_interval_entry.bind('<Return>', self.update_rec_interval)
self.rec_interval_entry.configure(state="disabled")
# Default settings checkbox
self.standard_settings = IntVar()
self.standard_settings.set(1)
def cb(self, event=None):
global rec_interval, exposure, pix_clock
if self.standard_settings_val.get() == 1:
try:
self.rec_contents.set(int(self.parent.init_rec_interval))
rec_interval = self.parent.init_rec_interval
self.update_rec_interval(event=True)
except:
pass
try:
self.pix_clock_contents.set(int(self.parent.init_pix_clock))
pix_clock = self.parent.init_pix_clock
self.update_pix_clock(event=True)
except:
pass
try:
self.contents.set(int(self.parent.init_exposure))
exposure = self.parent.exposure
self.update_exposure(event=True)
except:
pass
self.scale_entry.configure(state="disabled")
self.exposure_entry.configure(state="disabled")
self.rec_interval_entry.configure(state="disabled")
self.pix_clock_entry.configure(state="disabled")
self.mm_settings.configure(state="disabled")
else:
self.scale_entry.configure(state="enabled")
self.exposure_entry.configure(state="enabled")
self.rec_interval_entry.configure(state="enabled")
self.pix_clock_entry.configure(state="enabled ")
self.mm_settings.configure(state="enabled")
return
def cb2(self, event=None):
if self.mm_settings_val.get() == 1:
self.standard_settings_val.set(0)
tmb.showwarning(title="Warning", message = "This might make things go faster, and it might make things crash. You were warned.")
return
self.standard_settings_val = IntVar()
self.standard_settings_val.set(1)
self.standard_settings = ttk.Checkbutton(settings_group, text='Default', onvalue=1, offvalue=0, variable=self.standard_settings_val, command= lambda: cb(self))
self.standard_settings.grid(row=5, column=0, padx=0, pady=0, sticky=W)
self.standard_settings.configure(state="enabled")
self.mm_settings_val = IntVar()
self.mm_settings_val.set(0)
self.mm_settings = ttk.Checkbutton(settings_group, text='Faster?', onvalue=1, offvalue=0, variable=self.mm_settings_val, command= lambda: cb2(self))
self.mm_settings.grid(row=5, column=1, padx=0, pady=0, sticky=W)
self.mm_settings.configure(state="disabled")
# Analysis Settings group (e.g. camera and files)
# Num lines settings
numlines_label = ttk.Label(ana_settings_group, text = '# of lines:')
numlines_label.grid(row=0, column=0, sticky=E)
num_lines = self.parent.num_lines
self.num_lines_contents = IntVar()
self.num_lines_contents.set(int(num_lines))
global num_lines_prevcontents
num_lines_prevcontents = self.num_lines_contents.get()
self.num_lines_entry = ttk.Entry(ana_settings_group, textvariable = self.num_lines_contents,width=10)
self.num_lines_entry.grid(row=0, column=1, pady=0)
self.num_lines_entry.bind('<Return>', self.update_num_lines)
# Smooth settings
smooth_label = ttk.Label(ana_settings_group, text = 'Smooth:')
smooth_label.grid(row=1, column=0, sticky=E)
smooth = self.parent.smooth_factor
smoothfloat = int(smooth)
self.smooth_contents = IntVar()
self.smooth_contents.set(smoothfloat)
global smooth_prevcontents
smooth_prevcontents = self.smooth_contents.get()
self.smooth_entry = ttk.Entry(ana_settings_group, textvariable = self.smooth_contents,width=10)
self.smooth_entry.grid(row=1, column=1, pady=0)
self.smooth_entry.bind('<Return>', self.update_smooth)
# Integration settings
integration_label = ttk.Label(ana_settings_group, text = 'Integration:')
integration_label.grid(row=2, column=0, sticky=E)
integration = self.parent.integration_factor
integrationfloat = int(integration)
self.integration_contents = IntVar()
self.integration_contents.set(integrationfloat)
global integration_prevcontents
integration_prevcontents = self.integration_contents.get()
self.integration_prevcontents = integration_prevcontents
self.integration_entry = ttk.Entry(ana_settings_group, textvariable = self.integration_contents,width=10)
self.integration_entry.grid(row=2, column=1, pady=0)
self.integration_entry.bind('<Return>', self.update_integration)
# Threshold settings
thresh_label = ttk.Label(ana_settings_group, text = 'Threshold:')
thresh_label.grid(row=3, column=0, sticky=E)
thresh = self.parent.thresh_factor
threshfloat = thresh
self.thresh_contents = DoubleVar()
self.thresh_contents.set(threshfloat)
global thresh_prevcontents
thresh_prevcontents = self.thresh_contents.get()
self.thresh_prevcontents = thresh_prevcontents
self.thresh_entry = ttk.Entry(ana_settings_group, textvariable = self.thresh_contents,width=10)
self.thresh_entry.grid(row=3, column=1, pady=0)
self.thresh_entry.bind('<Return>', self.update_thresh)
# Filter setting
self.filter_is_checked = IntVar()
filter_checkBox = ttk.Checkbutton(ana_settings_group, text='Filter', onvalue=1, offvalue=0, variable=self.filter_is_checked)
filter_checkBox.grid(row=4, padx=0, pady=0, sticky=W)
# Use ROI setting
self.ROI_is_checked = IntVar()
self.ROI_checkBox = ttk.Checkbutton(ana_settings_group, text='ROI', onvalue=1, offvalue=0, variable=self.ROI_is_checked)
self.ROI_checkBox.grid(row=4, column=1, padx=0, pady=0, sticky=W)
self.ROI_checkBox.configure(state="disabled")
# Calculate inner diameter setting
self.ID_is_checked = IntVar()
self.ID_is_checked.set(1)
self.ID_checkBox = ttk.Checkbutton(ana_settings_group, text='ID', onvalue=1, offvalue=0, variable=self.ID_is_checked)
self.ID_checkBox.grid(row=5, column=0, padx=0, pady=0, sticky=W)
self.ID_checkBox.configure(state="enabled")
# ROI Settings
self.set_roi = False
def ROIset_button_function(get_coords):
self.set_roi = True
print "Set ROI = ", self.set_roi
self.ROI_checkBox.configure(state="enabled")
stop_acq()
return self.set_roi
ROI_set_button = ttk.Button(ana_settings_group, text='Set ROI', command= lambda: ROIset_button_function(get_coords=True))
ROI_set_button.grid(row=6,column=0, columnspan=1, pady=0)
# ROI Settings
def refdiam_set_button_function(get_coords):
#### TODO SORT THIS
self.ref_OD = self.parent.OD
self.parent.tableframe.max_diameter_text.set(round(self.parent.toolbar.ref_OD,2))
print "set ref button pressed = ", self.ref_OD
return self.ref_OD
refdiam_set_button = ttk.Button(ana_settings_group, text='Set ref', command= lambda: refdiam_set_button_function(get_coords=True))
refdiam_set_button.grid(row=6,column=1, columnspan=1, padx = 0,pady=0)
# Outer diameter group
# Function for the labels
def coord_label(window, text, row, column):
label=ttk.Label(window, text=text)
label.configure(anchor="center")
label.grid(row=row, column=column, padx = 0, pady = 0,sticky=E)
# Function for the labels 2
def coord_entry(window, row, column, coord_label):
entry = ttk.Entry(window, width=8, textvariable=coord_label)
entry.config(state=NORMAL)
entry.grid(row=row, column=column, padx=1, sticky=E)
self.parent.master.focus_set()
entry.focus_set()
self.parent.master.focus_force()
return entry
def coord_limits(get_coords, default):
if get_coords == True:
if default:
self.xlims = (self.x_min_default, self.x_max_default)
self.ylims = (self.y_min_default, self.y_max_default)
self.ylims2 = (self.y_min_default2, self.y_max_default2)
outer_xmin_entry.delete(0, END), outer_xmax_entry.delete(0, END)
outer_xmin_entry.insert('0', self.x_min_default), outer_xmax_entry.insert('0', self.x_max_default)
outer_ymin_entry.delete(0, END), outer_ymax_entry.delete(0, END)
outer_ymin_entry.insert('0', self.y_min_default), outer_ymax_entry.insert('0', self.y_max_default)
inner_ymin_entry.delete(0, END), inner_ymax_entry.delete(0, END)
inner_ymin_entry.insert('0', self.y_min_default2), inner_ymax_entry.insert('0', self.y_max_default2)
self.parent.graphframe.update_scale()
print "it did it"
else:
self.xlims = (self.x_min_label.get(),self.x_max_label.get())
self.ylims = (self.y_min_label.get(),self.y_max_label.get())
self.ylims2 = (self.y_min_label2.get(),self.y_max_label2.get())
self.parent.graphframe.update_scale()
print "it did it"
return self.xlims, self.ylims, self.ylims2
get_coords = False
else:
pass
# Set the initial xlimit values
self.x_min_label, self.x_max_label = IntVar(value=self.parent.x_min_default), IntVar(value=self.parent.x_max_default)
self.x_min_default, self.x_max_default = self.x_min_label.get(),self.x_max_label.get()
# Set the initial OD values
self.y_min_label, self.y_max_label = IntVar(value=self.parent.y_min_default), IntVar(value=self.parent.y_max_default)
self.y_min_default, self.y_max_default = self.y_min_label.get(),self.y_max_label.get()
# Set the initial ID values
self.y_min_label2, self.y_max_label2 = IntVar(value=self.parent.y_min_default2), IntVar(value=self.parent.y_max_default2)
self.y_min_default2, self.y_max_default2 = self.y_min_label2.get(),self.y_max_label2.get()
# Get the x and y limits
self.xlims = (self.x_min_label.get(),self.x_max_label.get())
self.ylims = (self.y_min_label.get(),self.y_max_label.get())
self.ylims2 = (self.y_min_label2.get(),self.y_max_label2.get())
coord_label(outer_diameter_group, 'Min:', 3, 1)
coord_label(outer_diameter_group, 'Max:', 3, 2)
coord_label(outer_diameter_group, 'Time:', 0, 0) # outer_xmin_label =
coord_label(outer_diameter_group, 'OD', 1, 0) # outer_xmax_label =
coord_label(outer_diameter_group, 'ID:', 2, 0) # inner_ymin_label =
outer_xmin_entry = coord_entry(outer_diameter_group, 0, 1, self.x_min_label)
outer_xmax_entry = coord_entry(outer_diameter_group, 0, 2, self.x_max_label)
outer_ymin_entry = coord_entry(outer_diameter_group, 1, 1, self.y_min_label)
outer_ymax_entry = coord_entry(outer_diameter_group, 1, 2, self.y_max_label)
inner_ymin_entry = coord_entry(outer_diameter_group, 2, 1, self.y_min_label2)
inner_ymax_entry = coord_entry(outer_diameter_group, 2, 2, self.y_max_label2)
# Button to set the axis limits
set_button = ttk.Button(outer_diameter_group, text='Set', command= lambda: coord_limits(get_coords=True, default = False))
set_button.grid(row=5, column=1, padx = 0, pady=0)
# Button to set the axis limits to the default values
default_button = ttk.Button(outer_diameter_group, text='Default', command= lambda: coord_limits(get_coords=True, default = True))
default_button.grid(row=6, column=1, padx =0, pady=0)
# acquisition_group
time_label = ttk.Label(acquisition_group, text = 'hr:min:sec:msec:')
time_label.grid(row=0, column=0, sticky=E)
temp_label = ttk.Label(acquisition_group, text = 'Temp (' + u"\u00b0C):")
temp_label.grid(row=1, column=0, sticky=E)
pressureavg_label = ttk.Label(acquisition_group, text = 'Pressure (mmHg):')
pressureavg_label.grid(row=2, column=0, sticky=E)
outdiam_label = ttk.Label(acquisition_group, text = 'OD (' + u"\u03bcm):")
outdiam_label.grid(row=3, column=0, sticky=E)
indiam_label = ttk.Label(acquisition_group, text = 'ID (' + u"\u03bcm):")
indiam_label.grid(row=4, column=0, sticky=E)
outdiam_pcnt_label = ttk.Label(acquisition_group, text = 'Diameter (%):')
outdiam_pcnt_label.grid(row=5, column=0, sticky=E, pady=5)
self.time_contents = IntVar()
self.time_contents.set(str(datetime.timedelta(seconds=time.time()-time.time()))[:-4])
self.time_entry = ttk.Entry(acquisition_group, textvariable = self.time_contents, width=10)
self.time_entry.config(state=DISABLED)
self.time_entry.grid(row=0, column=1, pady=0)
self.temp_contents = IntVar()
self.temp_contents.set("N/A")
self.temp_entry = ttk.Entry(acquisition_group, textvariable = self.temp_contents,width=10)
self.temp_entry.config(state=DISABLED)
self.temp_entry.grid(row=1, column=1, pady=0)
self.pressure_contents = IntVar()
self.pressure_contents.set("N/A")
self.pressure_entry = ttk.Entry(acquisition_group, textvariable = self.pressure_contents,width=10)
self.pressure_entry.config(state=DISABLED)
self.pressure_entry.grid(row=2, column=1, pady=0)
self.outdiam_contents = IntVar()
self.outdiam_contents.set("N/A")
self.outdiam_entry = ttk.Entry(acquisition_group, textvariable = self.outdiam_contents,width=10)
self.outdiam_entry.config(state=DISABLED)
self.outdiam_entry.grid(row=3, column=1, pady=0)
self.indiam_contents = IntVar()
self.indiam_contents.set("N/A")
self.indiam_entry = ttk.Entry(acquisition_group, textvariable = self.indiam_contents,width=10)
self.indiam_entry.config(state=DISABLED)
self.indiam_entry.grid(row=4, column=1, pady=0)
self.outdiam_pcnt_contents = IntVar()
self.outdiam_pcnt_contents.set("N/A")
self.outdiam_pcnt_entry = ttk.Entry(acquisition_group, textvariable = self.outdiam_pcnt_contents,width=10, foreground='red')
self.outdiam_pcnt_entry.config(state=DISABLED)
self.outdiam_pcnt_entry.grid(row=5, column=1, pady=5)
# Function that will start the image acquisition
def start_acq():
if self.variable.get() == "...":
tmb.showwarning(title="Warning", message = "You need to select a camera source!")
self.start_flag = False
else:
self.standard_settings.configure(state="disabled")
self.mm_settings.configure(state="disabled")
self.camera_entry.configure(state="disabled")
self.scale_entry.configure(state="disabled")
self.exposure_entry.configure(state="disabled")
self.pix_clock_entry.configure(state="disabled")
self.rec_interval_entry.configure(state="disabled")
self.num_lines_entry.configure(state="disabled")
self.start_flag = True
self.record_video_checkBox.configure(state="disabled")
self.standard_settings.configure(state="disabled")
mmc.startContinuousSequenceAcquisition(0)
for radio_button in self.FOV_buttons:
radio_button.configure(state="active")
return self.start_flag
# Function that will stop the image acquisition
def stop_acq():
self.standard_settings.configure(state="enabled")
if self.standard_settings_val.get() == 0:
self.scale_entry.configure(state="enabled")
self.exposure_entry.configure(state="enabled")
self.pix_clock_entry.configure(state="enabled")
self.rec_interval_entry.configure(state="enabled")
self.mm_settings.configure(state="enabled")
self.camera_entry.configure(state="enabled")
self.num_lines_entry.configure(state="enabled")
self.start_flag = False
self.record_video_checkBox.configure(state="enabled")
mmc.stopSequenceAcquisition()
self.record_flag = False
return self.start_flag,self.record_flag
# Function that will start the data acquisition
self.record_flag = False
def record_data():
if self.start_flag == True:
self.record_flag = True
mmc.clearCircularBuffer()
return self.record_flag
def snapshot():
self.snapshot_flag = True
return self.snapshot_flag
start_button = ttk.Button(start_group, text='Start', command= lambda: start_acq())
start_button.grid(row=0, column=0, pady=0, sticky=N+S+E+W)
record_button = ttk.Button(start_group, text='Track', command= lambda: record_data())
record_button.grid(row=1, column=0, pady=0, sticky=N+S+E+W)
live_button = ttk.Button(start_group, text='Pause', command= lambda: stop_acq())
live_button.grid(row=3, column=0, pady=0, sticky=N+S+E+W)
self.snapshot_flag = False
snapshot_button = ttk.Button(start_group, text='Snapshot', command= lambda: snapshot())
snapshot_button.grid(row=4, column=0, pady=0, sticky=N+S+E+W)
self.record_is_checked = IntVar()
self.record_video_checkBox = ttk.Checkbutton(start_group,text='Record?', onvalue=1, offvalue=0, variable=self.record_is_checked)
self.record_video_checkBox.grid(row=5, column=0, columnspan=1, padx=5, pady=3, sticky=W)
##################################################
## Graph window
##################################################
class GraphFrame(tk.Frame):
# Initialisation function
def __init__(self,parent):
tk.Frame.__init__(self, parent)#, bg = "yellow")#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.top = Frame()
self.top.update_idletasks()
self.xlim1 = self.parent.toolbar.x_min_default # Outer
self.xlim2 = self.parent.toolbar.x_max_default # Outer
self.ylim1 = self.parent.toolbar.y_min_default # Outer
self.ylim2 = self.parent.toolbar.y_max_default # Outer
self.ylim3 = self.parent.toolbar.y_min_default2 # Inner
self.ylim4 = self.parent.toolbar.y_max_default2 # Inner
# Function for updating the scale
def update_scale(self, blit=True): #### NEE
self.graphview.ax1.set_xlim(self.parent.toolbar.xlims[0],self.parent.toolbar.xlims[1]) # Outer diameter
self.graphview.ax1.set_ylim(self.parent.toolbar.ylims[0],self.parent.toolbar.ylims[1]) # Outer diameter
self.graphview.ax2.set_xlim(self.parent.toolbar.xlims[0],self.parent.toolbar.xlims[1]) # Inner diameter
self.graphview.ax2.set_ylim(self.parent.toolbar.ylims2[0],self.parent.toolbar.ylims2[1]) # Inner diameter
self.graphview.figure.canvas.draw()
# Set things up in here
def mainWidgets(self,blit=True):
# We want to explicitly set the size of the graph so that we can blit
self.graphview = tk.Label(self)
other_figsize = [self.parent.graphframe.winfo_width()/100,self.parent.graphframe.winfo_height()/100]
self.graphview.figure,(self.graphview.ax1,self.graphview.ax2) = plt.subplots(2,1, figsize=other_figsize)
# initialize lines to be drawn
self.graphview.line, = self.graphview.ax1.plot([],[])
self.graphview.line2, = self.graphview.ax2.plot([],[])
# Set axis limits
self.graphview.ax1.set_xlim(self.xlim1,self.xlim2) # Outer
self.graphview.ax2.set_xlim(self.xlim1,self.xlim2) # Inner
self.graphview.ax1.set_ylim(self.ylim1,self.ylim2) # Outer
self.graphview.ax2.set_ylim(self.ylim3,self.ylim4) # Inner
# Set axis labels
self.graphview.ax1.set_ylabel('Outer diameter (um)', fontsize=14) # Outer diameter labels
self.graphview.ax2.set_xlabel('Time (s)', fontsize=14) # Inner diameter labels
self.graphview.ax2.set_ylabel('Lumen diameter (um)', fontsize=14) # Inner diameter labels
# Draw the canvas
self.graphview.figure.canvas = FigureCanvasTkAgg(self.graphview.figure, self)
self.graphview.figure.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=None, expand=False) ##### THIS IS THE PROBLEM WITH BLITTING HERE. WE NEED TO EXPLICITLY STATE THE FIGURE SIZE ABOVE!!
self.graphview.figure.canvas.draw()
# Get the background (because we are blitting)
self.ax1background = self.graphview.figure.canvas.copy_from_bbox(self.graphview.ax1.bbox)
self.ax2background = self.graphview.figure.canvas.copy_from_bbox(self.graphview.ax2.bbox)
#bbarrray = self.graphview.ax1.bbox.get_points()
#my_blit_box = Bbox(bbarrray)
#my_blit_box = Bbox.from_bounds(bbarrray[0][0], bbarrray[0][1], (bbarrray[1][0]-bbarrray[0][0])*1.5, bbarrray[1][1]-bbarrray[0][1])
#self.ax1background = self.graphview.figure.canvas.copy_from_bbox(my_blit_box)
# Plotting function
def plot(self, xdata, ydata1,ydata2,xlims,ylims, ylims2):
# Restore the background
self.graphview.figure.canvas.restore_region(self.ax1background)
self.graphview.figure.canvas.restore_region(self.ax2background)
# Remove the previous lines
try:
self.graphview.ax1.lines.remove(self.graphview.line)
self.graphview.ax2.lines.remove(self.graphview.line2)
except:
pass
# Set the new data
self.graphview.line.set_xdata(xdata)
self.graphview.line.set_ydata(ydata1)
self.graphview.line.set_color('blue')
self.graphview.line2.set_xdata(xdata) #self.graphview.line2.set_xdata(xdata4[::-1][0::int(self.parent.delta_i)][::-1])
self.graphview.line2.set_ydata(ydata2)
self.graphview.line2.set_color('red')
# redraw just the points
self.graphview.ax1.draw_artist(self.graphview.line)
self.graphview.ax2.draw_artist(self.graphview.line2)
# fill in the axes rectangle
self.graphview.figure.canvas.blit(self.graphview.ax1.bbox)
self.graphview.figure.canvas.blit(self.graphview.ax2.bbox)
##################################################
## Timing functions
##################################################
class TimeIt():
from datetime import datetime
def __init__(self):
self.name = None
def __call__(self, name):
self.name = name
return self
def __enter__(self):
self.tic = self.datetime.now()
return self
def __exit__(self,name, *args, **kwargs):
print('process ' + self.name + ' runtime: {}'.format(self.datetime.now() - self.tic))##]]
class TimeIt2():
from datetime import datetime
def __init__(self):
self.name = None
def __call__(self, name):
self.name = name
return self
def __enter__(self):
self.tic = self.datetime.now()
return self
def __exit__(self,name, *args, **kwargs):
print('process ' + self.name + ' runtime: {}'.format(self.datetime.now() - self.tic))##]]
##################################################
## Table
##################################################
class TableFrame(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent)#,highlightthickness=2,highlightbackground="#111")#, width=250, height = 300)#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
def mainWidgets(self):
self.tableview = ttk.Frame(self)
self.tableview.grid(row=1, column=3, sticky=N+S+E+W)
def add_row():
try:
OutDiam = float(self.parent.OD)
#InDiam = float(ID)
Label = table_text_entry.get()
Time = (time.time() - self.parent.start_time)
Time = float(Time)
Time = round(Time, 1)
#mxDiastring = StringVar()
try:
self.max_diameter_text.set(round(self.parent.toolbar.ref_OD,2))
max_diameter = self.parent.toolbar.ref_OD
except:
self.max_diameter_text.set(np.nan)
max_diameter = np.nan
#max_diameter = max_diameter_text.set()
#max_diameter = int(max_diameter)
if max_diameter > 0:
max_diameter = float(max_diameter)
max_percent = ((float(OutDiam/max_diameter))*100)
max_percent = round(max_percent, 1)
table_1.insert('', 'end', values=(Time, Label, OutDiam, self.parent.P1,self.parent.P2, max_percent)) #P1, P2
hello = ((Time, Label, OutDiam, self.parent.P1, self.parent.P2, max_percent))
else:
max_percent = '-'
table_1.insert('', 'end', values=(Time, Label, OutDiam, self.parent.P1,self.parent.P2, max_percent)) #P1, P2
hello = ((Time, Label, OutDiam, self.parent.P1, self.parent.P2, max_percent))
table_1.yview_moveto(1)
except ValueError:
max_percent = '-'
table_1.insert('', 'end', values=(Time, Label, OutDiam, self.parent.P1, self.parent.P2, max_percent))
hello = ((Time, Label, OutDiam, self.parent.P1, self.parent.P2))
save_table(hello)
table_text_entry = StringVar()
self.max_diameter_text = IntVar()
def save_table(hello):
with open((self.parent.txt_file), 'ab') as g:
w=csv.writer(g, quoting=csv.QUOTE_ALL)
w.writerow(hello)
table_text_entry = StringVar()
self.max_diameter_text = IntVar()
table_2 = tk.Frame(self.tableview)
table_2.grid(row=0, column=0, columnspan=5, sticky=N+S+E+W)
table_label = ttk.Label(table_2, text = 'Label:')
table_label.grid(row=0, column=0)
table_entry = ttk.Entry(table_2, width=30, textvariable=table_text_entry )
table_entry.grid(row=0, column=1)
add_button = ttk.Button(table_2, text='Add', command=add_row)
add_button.grid(row=0, column=2)
max_diameter_label = ttk.Label(table_2, text='Reference Diameter:')
max_diameter_label.grid(row=0, column=3)
max_diameter_entry = ttk.Entry(table_2, width=10, textvariable=self.max_diameter_text )
max_diameter_entry.grid(row=0, column=4)
max_diameter_entry.config(state=DISABLED)
table_1 = ttk.Treeview(self.tableview, show= 'headings')
table_1["columns"] = ('Time', 'Label', 'Outer Diameter', 'Pressure 1', 'Pressure 2', '% Ref')
table_1.column('#0', width=30)
table_1.column('Time', width=100, stretch=True)
table_1.column('Label', width=150)
table_1.column('Outer Diameter', width=100)
table_1.column('Pressure 1', width=100)
table_1.column('Pressure 2', width=100)
table_1.column('% Ref', width=50)
table_1.heading('#1', text = 'Time')
table_1.heading('#2', text = 'Label')
table_1.heading('#3', text = 'Outer Diameter')
table_1.heading('#4', text = 'Pressure 1')
table_1.heading('#5', text = 'Pressure 2')
table_1.heading('#6', text = '% Ref')
scrollbar = Scrollbar(self.tableview)
scrollbar.grid(row=1,column=2, sticky=NS)
scrollbar.config( command = table_1.yview )
table_1.grid(row=1, column=1, sticky=N+S+E+W)
##################################################
## Camera window
##################################################
class CameraFrame(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent)#, width=1000, height = 600)#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
def mainWidgets(self):
# Get the max dimensions that the Canvas can be
self.maxheight = self.parent.graphframe.winfo_height() - self.parent.tableframe.winfo_height() - self.parent.status_bar.winfo_height()
self.maxwidth = self.parent.status_bar.winfo_width() - self.parent.graphframe.winfo_width()
# Set up the Canvas that we will show the image on
self.cameraview = tk.Canvas(self, width=self.maxwidth, height=self.maxheight, background='white')
self.cameraview.grid(row=2,column=2,sticky=N+S+E+W, pady=ypadding)
# ROI rectangle initialisation
self.rect = None
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
# Factors for scaling ROI to original image (which is scaled to fit canvas)
self.delta_width = None
self.delta_height = None
self.scale_factor = None
# Bind events to mouse
self.cameraview.bind("<ButtonPress-1>",self.on_button_press)
self.cameraview.bind("<B1-Motion>",self.on_move_press)
self.cameraview.bind("<ButtonRelease-1>",self.on_button_release)
# Define functions for mouse actions
def on_button_press(self, event):
if self.parent.toolbar.set_roi == True: # Only enable if we have just pressed the button
# Delete any old ROIs
found = event.widget.find_all()
for iid in found:
if event.widget.type(iid) == 'rectangle':
event.widget.delete(iid)
# Create the rectangle ROI
self.start_x = event.x
self.start_y = event.y
self.rect = self.cameraview.create_rectangle(self.start_x, self.start_y, self.start_x, self.start_y)
def on_move_press(self, event):
#Update the ROI when the mouse is dragged
if self.parent.toolbar.set_roi == True:
curX, curY = (event.x, event.y)
self.cameraview.coords(self.rect, self.start_x, self.start_y, curX, curY)
def on_button_release(self, event):
if self.parent.toolbar.set_roi == True: # Only enable if we have just pressed the button
self.end_x = event.x
self.end_y = event.y
self.parent.toolbar.set_roi = False
self.parent.toolbar.ROI_checkBox.state(['selected'])
self.parent.toolbar.ROI_is_checked.set(1)
pass
def rescale_frame(self,frame):
# Scaling a rectangle to fit inside another rectangle.
# works out destinationwidth/sourcewidth and destinationheight/sourceheight
# and scaled by the smaller of the two ratios
width = frame.shape[1]
height = frame.shape[0]
widthfactor = self.maxwidth / width
heightfactor = self.maxheight / height
if widthfactor < heightfactor:
self.scale_factor = widthfactor
else:
self.scale_factor = heightfactor
global scale_factor
scale_factor = self.scale_factor
width = int(frame.shape[1] * self.scale_factor)
height = int(frame.shape[0] * self.scale_factor)
self.delta_width = int((self.maxwidth - width)/2)
self.delta_height = int((self.maxheight - height)/2)
return cv2.resize(frame, (width, height), interpolation = cv2.INTER_AREA)
def process_queue(self,params,img,count):
try:
img = img
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
OD1,OD2,ID1,ID2,start,diff,ODS_flag,IDS_flag, ROI = params
if self.parent.toolbar.record_flag:
# Draw the diameters:
for m in range(len(OD1)):
if self.parent.toolbar.filter_is_checked.get() == 0:
C1 = (255,0,0) # blue
C2 = (0,0,255) #red
else:
if ODS_flag[m] == 1:
C1 = (255,0,0) # blue
else:
C1 = (0,0,0) #black
if IDS_flag[m] == 1:
C2 = (0,0,255) #red
else:
C2 = (0,0,0) #black
pos = m*diff+start
#Horizontal lines
imgc = cv2.line(imgc,(OD1[m],pos),(OD2[m],pos),C1,4) #in opencv rgb is bgr
if self.parent.toolbar.ID_is_checked.get() == 1:
imgc = cv2.line(imgc,(ID2[m],pos),(ID1[m],pos),C2,2) #in opencv rgb is bgr
#Vertical lines
imgc = cv2.line(imgc,(OD2[m],pos-5),(OD2[m],pos+5),C1,4) #in opencv rgb is bgr
imgc = cv2.line(imgc,(OD1[m],pos-5),(OD1[m],pos+5),C1,4) #in opencv rgb is bgr
if self.parent.toolbar.ID_is_checked.get() == 1:
imgc = cv2.line(imgc,(ID2[m],pos-5),(ID2[m],pos+5),C2,2) #in opencv rgb is bgr
imgc = cv2.line(imgc,(ID1[m],pos-5),(ID1[m],pos+5),C2,2) #in opencv rgb is bgr
# Adding ROI to the image.
# There is a problem here.
# The RECTANGLE function uses coordinates from a box drawn on a scaled image
# We then plot these directly onto the original image, and then scale it again
# I need to transform the rectangle coordinates and subtract these off.
#heightdiff = self.maxheight - imgc.shape[0]
#widthdiff = self.maxwidth - imgc.shape[1]
# This is drawing on the region of interest
Cwhite = (0,165,255)
# First one is horizontal line
if self.rect and self.parent.toolbar.ROI_is_checked.get() == 1:
rx0 = int((ROI[0][0] - self.delta_width)/self.scale_factor)#
rx1 = int((ROI[1][0] - self.delta_width)/self.scale_factor)#
ry0 = int((ROI[0][1] - self.delta_height)/self.scale_factor) #
ry1 = int((ROI[1][1] - self.delta_height)/self.scale_factor)#
else:
#print "Using this ROI"
rx0 = ROI[0][0]#int((ROI[0][0] - self.delta_width)/self.scale_factor)
rx1 = ROI[1][0]#int((ROI[1][0] - self.delta_width)/self.scale_factor)
ry0 = ROI[0][1]#int((ROI[0][1] - self.delta_height)/self.scale_factor)
ry1 = ROI[1][1]#int((ROI[1][1] - self.delta_height)/self.scale_factor)
imgc = cv2.line(imgc,(rx0,ry0),(rx1,ry0),Cwhite,1) #in opencv rgb is bgr
imgc = cv2.line(imgc,(rx0,ry1),(rx1,ry1),Cwhite,1) #in opencv rgb is bgr
imgc = cv2.line(imgc,(rx0,ry0),(rx0,ry1),Cwhite,1) #in opencv rgb is bgr
imgc = cv2.line(imgc,(rx1,ry0),(rx1,ry1),Cwhite,1) #in opencv rgb is bgr
#cv2.putText(imgc, 't=%.2f s' %timenow,(30,30), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA)
if self.parent.toolbar.record_is_checked.get() == 1 and self.parent.count%self.parent.rec_interval == 0:
directory = os.path.join(head, self.parent.filename.name[:-4]+'\\Tiff\\')
if not os.path.exists(directory):
os.makedirs(directory)
gfxPath = os.path.join(directory, '%s_f=%s_Result.tiff' % (os.path.splitext(tail)[0],str(int(self.parent.count/self.parent.rec_interval)).zfill(6)))
cv2.imwrite(gfxPath,imgc)
#out.write(imgc) # For writing to AVI file.
else:
pass
if self.parent.toolbar.snapshot_flag == True:
print "Snapshot pressed"
gfxPath = os.path.join(head, '%s_t=%ss_Result SNAPSHOT.tiff' % (os.path.splitext(tail)[0],str(int(self.parent.count/self.parent.rec_interval)).zfill(6)))
cv2.imwrite(gfxPath,imgc)
self.parent.toolbar.snapshot_flag = False
else:
pass
#Rescale the image so it doesnt take over the screen
imgc = self.rescale_frame(imgc)
imgc = cv2.cvtColor(imgc, cv2.COLOR_BGR2RGBA)
prevImg = Image.fromarray(imgc)
imgtk = ImageTk.PhotoImage(image=prevImg)
#Show the image
self.imgtk = imgtk
self.image_on_canvas_ = self.cameraview.create_image(self.maxwidth/2, self.maxheight/2, anchor=CENTER,image=self.imgtk)
except:
pass
##################################################
## Threaded client, check if there are images and process the images in seperate threads
##################################################
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
#threading.Thread.daemon = True # Make sure the thread terminates on exit
self.master = master
# Create the queue
self.queue = Queue.Queue( )
# Set up the GUI part
self.gui = GuiPart(self.master, self.queue,self.endApplication)
# Set up the thread to do asynchronous I/O
# More threads can also be created and used, if necessary
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
#self.thread1.deamon = True
self.thread1.start( )
# Start the periodic call in the GUI to check if the queue contains anything
self.periodicCall( )
# Check every 50 ms if there is something new in the queue.
def periodicCall(self):
if self.running:
if self.queue.qsize( ) > 0:
self.gui.processIncoming()
else:
pass
self.master.after(50, self.periodicCall)
# Thread for getting camera images. Adds each image to a queue
def workerThread1(self):
while self.running:
if(self.queue.empty()):
try: # Catch exception on closing the window!
# Check if there is an image in the buffer, or an image acuisition in progress
#print "image remaining count = ", mmc.getRemainingImageCount()
if (mmc.getRemainingImageCount() > 0 or mmc.isSequenceRunning()):
if mmc.getRemainingImageCount() > 0:
img = mmc.getLastImage()
'''
#Get image
if self.gui.toolbar.mm_settings_val.get() == 1:
img = mmc.getLastImage()# mmc.popNextImage()# ## Get the next image. mmc.popNextImage() #
elif self.gui.toolbar.mm_settings_val.get() == 0:
img = mmc.popNextImage()
else:
pass
'''
self.queue.put(img) # Put the image in the queue
else:
pass
if self.gui.toolbar.mm_settings_val.get() == 1:
time.sleep(self.gui.Force_faster_interval)
else:
time.sleep(self.gui.Force_min_interval)
else:
pass
except:
pass
else:
pass
# Function that cleans up on exit. It should kill all processes properly.
def endApplication(self):
try:
mmc.stopSequenceAcquisition() # stop uManager acquisition
mmc.reset() # reset uManager
except:
pass
self.running = 0
#sys.exit()
self.master.quit()
self.master.destroy()
##################################################
## Splash screen
##################################################
rootsplash = tk.Tk()
rootsplash.overrideredirect(True)
width, height = rootsplash.winfo_screenwidth(), rootsplash.winfo_screenheight()
#print "Screen height is = ", height
#print "Screen width is = ", width
#Load in the splash screen image
image_file = "Splash.gif"
image = Image.open(image_file)
image2 = PhotoImage(file=image_file)
# Scale to half screen, centered
imagewidth, imageheight = image2.width(), image2.height()
newimagewidth, newimageheight = int(np.floor(width*0.5)), int(np.floor(height*0.5))
image = image.resize((newimagewidth,newimageheight), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
# Create and show for 3 seconds
rootsplash.geometry('%dx%d+%d+%d' % (newimagewidth, newimageheight, width/2 - newimagewidth/2, height/2 - newimageheight/2))
canvas = tk.Canvas(rootsplash, height=height, width=width, bg="darkgrey")
canvas.create_image(width/2 - newimagewidth/2, height/2 - newimageheight/2, image=image)
canvas.pack()
rootsplash.after(3000, rootsplash.destroy)
rootsplash.mainloop()
##################################################
## Main application loop
##################################################
if __name__ == "__main__":
# Initiate uManager
mmc = MMCorePy.CMMCore()
# Create the main window
rand = random.Random( )
root = tk.Tk( )
root.iconbitmap('ICON.ICO')
root.attributes('-topmost',True)
root.after_idle(root.attributes,'-topmost',False)
root.wm_title("VasoTracker") #Makes the title that will appear in the top left
root.state("zoomed")
root.resizable(0,0) # Remove ability to resize
# Go go go!
client = ThreadedClient(root)
root.mainloop( )
|
neural_gpu_trainer.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural GPU."""
from __future__ import print_function
import math
import os
import random
import sys
import threading
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
import program_utils
import data_utils as data
import neural_gpu as ngpu
import wmt_utils as wmt
tf.app.flags.DEFINE_float("lr", 0.1, "Learning rate.")
tf.app.flags.DEFINE_float("init_weight", 0.8, "Initial weights deviation.")
tf.app.flags.DEFINE_float("max_grad_norm", 4.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("cutoff", 1.2, "Cutoff at the gates.")
tf.app.flags.DEFINE_float("curriculum_ppx", 9.9, "Move curriculum if ppl < X.")
tf.app.flags.DEFINE_float("curriculum_seq", 0.3, "Move curriculum if seq < X.")
tf.app.flags.DEFINE_float("dropout", 0.1, "Dropout that much.")
tf.app.flags.DEFINE_float("grad_noise_scale", 0.0, "Gradient noise scale.")
tf.app.flags.DEFINE_float("max_sampling_rate", 0.1, "Maximal sampling rate.")
tf.app.flags.DEFINE_float("length_norm", 0.0, "Length normalization.")
tf.app.flags.DEFINE_float("train_beam_freq", 0.0, "Beam-based training.")
tf.app.flags.DEFINE_float("train_beam_anneal", 20000, "How many steps anneal.")
tf.app.flags.DEFINE_integer("eval_beam_steps", 4, "How many beam steps eval.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size.")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 100, "Steps per epoch.")
tf.app.flags.DEFINE_integer("nmaps", 64, "Number of floats in each cell.")
tf.app.flags.DEFINE_integer("vec_size", 64, "Size of word vectors.")
tf.app.flags.DEFINE_integer("train_data_size", 1000, "Training examples/len.")
tf.app.flags.DEFINE_integer("max_length", 40, "Maximum length.")
tf.app.flags.DEFINE_integer("random_seed", 125459, "Random seed.")
tf.app.flags.DEFINE_integer("nconvs", 2, "How many convolutions / 1 step.")
tf.app.flags.DEFINE_integer("kw", 3, "Kernel width.")
tf.app.flags.DEFINE_integer("kh", 3, "Kernel height.")
tf.app.flags.DEFINE_integer("height", 4, "Height.")
tf.app.flags.DEFINE_integer("mem_size", -1, "Memory size (sqrt)")
tf.app.flags.DEFINE_integer("soft_mem_size", 1024, "Softmax memory this size.")
tf.app.flags.DEFINE_integer("num_gpus", 1, "Number of GPUs to use.")
tf.app.flags.DEFINE_integer("num_replicas", 1, "Number of replicas in use.")
tf.app.flags.DEFINE_integer("beam_size", 1, "Beam size during decoding. "
"If 0, no decoder, the non-extended Neural GPU.")
tf.app.flags.DEFINE_integer("max_target_vocab", 0,
"Maximal size of target vocabulary.")
tf.app.flags.DEFINE_integer("decode_offset", 0, "Offset for decoding.")
tf.app.flags.DEFINE_integer("task", -1, "Task id when running on borg.")
tf.app.flags.DEFINE_integer("nprint", 0, "How many test examples to print out.")
tf.app.flags.DEFINE_integer("eval_bin_print", 3, "How many bins step in eval.")
tf.app.flags.DEFINE_integer("mode", 0, "Mode: 0-train other-decode.")
tf.app.flags.DEFINE_bool("atrous", False, "Whether to use atrous convs.")
tf.app.flags.DEFINE_bool("layer_norm", False, "Do layer normalization.")
tf.app.flags.DEFINE_bool("quantize", False, "Whether to quantize variables.")
tf.app.flags.DEFINE_bool("do_train", True, "If false, only update memory.")
tf.app.flags.DEFINE_bool("rnn_baseline", False, "If true build an RNN instead.")
tf.app.flags.DEFINE_bool("simple_tokenizer", False,
"If true, tokenize on spaces only, digits are 0.")
tf.app.flags.DEFINE_bool("normalize_digits", True,
"Whether to normalize digits with simple tokenizer.")
tf.app.flags.DEFINE_integer("vocab_size", 16, "Joint vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp/", "Directory to store models.")
tf.app.flags.DEFINE_string("test_file_prefix", "", "Files to test (.en,.fr).")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_string("word_vector_file_en", "",
"Optional file with word vectors to start training.")
tf.app.flags.DEFINE_string("word_vector_file_fr", "",
"Optional file with word vectors to start training.")
tf.app.flags.DEFINE_string("problem", "wmt", "What problem are we solving?.")
tf.app.flags.DEFINE_integer("ps_tasks", 0, "Number of ps tasks used.")
tf.app.flags.DEFINE_string("master", "", "Name of the TensorFlow master.")
FLAGS = tf.app.flags.FLAGS
EXTRA_EVAL = 10
EVAL_LEN_INCR = 8
MAXLEN_F = 2.0
def zero_split(tok_list, append=None):
"""Split tok_list (list of ints) on 0s, append int to all parts if given."""
res, cur, l = [], [], 0
for tok in tok_list:
if tok == 0:
if append is not None:
cur.append(append)
res.append(cur)
l = max(l, len(cur))
cur = []
else:
cur.append(tok)
if append is not None:
cur.append(append)
res.append(cur)
l = max(l, len(cur))
return res, l
def read_data(source_path, target_path, buckets, max_size=None, print_out=True):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
buckets: the buckets to use.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
If set to 1, no data will be returned (empty lists of the right form).
print_out: whether to print out status or not.
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in buckets]
counter = 0
if max_size != 1:
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0 and print_out:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
source_ids, source_len = zero_split(source_ids)
target_ids, target_len = zero_split(target_ids, append=wmt.EOS_ID)
for bucket_id, size in enumerate(buckets):
if source_len <= size and target_len <= size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
global_train_set = {"wmt": []}
train_buckets_scale = {"wmt": []}
def calculate_buckets_scale(data_set, buckets, problem):
"""Calculate buckets scales for the given data set."""
train_bucket_sizes = [len(data_set[b]) for b in xrange(len(buckets))]
train_total_size = max(1, float(sum(train_bucket_sizes)))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
if problem not in train_buckets_scale:
train_buckets_scale[problem] = []
train_buckets_scale[problem].append(
[sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))])
return train_total_size
def read_data_into_global(source_path, target_path, buckets,
max_size=None, print_out=True):
"""Read data into the global variables (can be in a separate thread)."""
# pylint: disable=global-variable-not-assigned
global global_train_set, train_buckets_scale
# pylint: enable=global-variable-not-assigned
data_set = read_data(source_path, target_path, buckets, max_size, print_out)
global_train_set["wmt"].append(data_set)
train_total_size = calculate_buckets_scale(data_set, buckets, "wmt")
if print_out:
print(" Finished global data reading (%d)." % train_total_size)
def initialize(sess=None):
"""Initialize data and model."""
global MAXLEN_F
# Create training directory if it does not exist.
if not tf.gfile.IsDirectory(FLAGS.train_dir):
data.print_out("Creating training directory %s." % FLAGS.train_dir)
tf.gfile.MkDir(FLAGS.train_dir)
decode_suffix = "beam%dln%d" % (FLAGS.beam_size,
int(100 * FLAGS.length_norm))
if FLAGS.mode == 0:
decode_suffix = ""
if FLAGS.task >= 0:
data.log_filename = os.path.join(FLAGS.train_dir,
"log%d%s" % (FLAGS.task, decode_suffix))
else:
data.log_filename = os.path.join(FLAGS.train_dir, "neural_gpu/log")
# Set random seed.
if FLAGS.random_seed > 0:
seed = FLAGS.random_seed + max(0, FLAGS.task)
tf.set_random_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Check data sizes.
assert data.bins
max_length = min(FLAGS.max_length, data.bins[-1])
while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL:
data.bins = data.bins[:-1]
if sess is None and FLAGS.task == 0 and FLAGS.num_replicas > 1:
if max_length > 60:
max_length = max_length * 1 / 2 # Save memory on chief.
min_length = min(14, max_length - 3) if FLAGS.problem == "wmt" else 3
for p in FLAGS.problem.split("-"):
if p in ["progeval", "progsynth"]:
min_length = max(26, min_length)
assert max_length + 1 > min_length
while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL:
data.bins = data.bins[:-1]
# Create checkpoint directory if it does not exist.
if FLAGS.mode == 0 or FLAGS.task < 0:
checkpoint_dir = os.path.join(FLAGS.train_dir, "neural_gpu%s"
% ("" if FLAGS.task < 0 else str(FLAGS.task)))
else:
checkpoint_dir = FLAGS.train_dir
if not tf.gfile.IsDirectory(checkpoint_dir):
data.print_out("Creating checkpoint directory %s." % checkpoint_dir)
tf.gfile.MkDir(checkpoint_dir)
# Prepare data.
if FLAGS.problem == "wmt":
# Prepare WMT data.
data.print_out("Preparing WMT data in %s" % FLAGS.data_dir)
if FLAGS.simple_tokenizer:
MAXLEN_F = 3.5
(en_train, fr_train, en_dev, fr_dev,
en_path, fr_path) = wmt.prepare_wmt_data(
FLAGS.data_dir, FLAGS.vocab_size,
tokenizer=wmt.space_tokenizer,
normalize_digits=FLAGS.normalize_digits)
else:
(en_train, fr_train, en_dev, fr_dev,
en_path, fr_path) = wmt.prepare_wmt_data(
FLAGS.data_dir, FLAGS.vocab_size)
# Read data into buckets and compute their sizes.
fr_vocab, rev_fr_vocab = wmt.initialize_vocabulary(fr_path)
data.vocab = fr_vocab
data.rev_vocab = rev_fr_vocab
data.print_out("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = {}
dev_set["wmt"] = read_data(en_dev, fr_dev, data.bins)
def data_read(size, print_out):
read_data_into_global(en_train, fr_train, data.bins, size, print_out)
data_read(50000, False)
read_thread_small = threading.Thread(
name="reading-data-small", target=lambda: data_read(900000, False))
read_thread_small.start()
read_thread_full = threading.Thread(
name="reading-data-full",
target=lambda: data_read(FLAGS.max_train_data_size, True))
read_thread_full.start()
data.print_out("Data reading set up.")
else:
# Prepare algorithmic data.
en_path, fr_path = None, None
tasks = FLAGS.problem.split("-")
data_size = FLAGS.train_data_size
for t in tasks:
data.print_out("Generating data for %s." % t)
if t in ["progeval", "progsynth"]:
data.init_data(t, data.bins[-1], 20 * data_size, FLAGS.vocab_size)
if len(program_utils.prog_vocab) > FLAGS.vocab_size - 2:
raise ValueError("Increase vocab_size to %d for prog-tasks."
% (len(program_utils.prog_vocab) + 2))
data.rev_vocab = program_utils.prog_vocab
data.vocab = program_utils.prog_rev_vocab
else:
for l in xrange(max_length + EXTRA_EVAL - 1):
data.init_data(t, l, data_size, FLAGS.vocab_size)
data.init_data(t, data.bins[-2], data_size, FLAGS.vocab_size)
data.init_data(t, data.bins[-1], data_size, FLAGS.vocab_size)
if t not in global_train_set:
global_train_set[t] = []
global_train_set[t].append(data.train_set[t])
calculate_buckets_scale(data.train_set[t], data.bins, t)
dev_set = data.test_set
# Grid-search parameters.
lr = FLAGS.lr
init_weight = FLAGS.init_weight
max_grad_norm = FLAGS.max_grad_norm
if sess is not None and FLAGS.task > -1:
def job_id_factor(step):
"""If jobid / step mod 3 is 0, 1, 2: say 0, 1, -1."""
return ((((FLAGS.task / step) % 3) + 1) % 3) - 1
lr *= math.pow(2, job_id_factor(1))
init_weight *= math.pow(1.5, job_id_factor(3))
max_grad_norm *= math.pow(2, job_id_factor(9))
# Print out parameters.
curriculum = FLAGS.curriculum_seq
msg1 = ("layers %d kw %d h %d kh %d batch %d noise %.2f"
% (FLAGS.nconvs, FLAGS.kw, FLAGS.height, FLAGS.kh,
FLAGS.batch_size, FLAGS.grad_noise_scale))
msg2 = ("cut %.2f lr %.3f iw %.2f cr %.2f nm %d d%.4f gn %.2f %s"
% (FLAGS.cutoff, lr, init_weight, curriculum, FLAGS.nmaps,
FLAGS.dropout, max_grad_norm, msg1))
data.print_out(msg2)
# Create model and initialize it.
tf.get_variable_scope().set_initializer(
tf.orthogonal_initializer(gain=1.8 * init_weight))
max_sampling_rate = FLAGS.max_sampling_rate if FLAGS.mode == 0 else 0.0
o = FLAGS.vocab_size if FLAGS.max_target_vocab < 1 else FLAGS.max_target_vocab
ngpu.CHOOSE_K = FLAGS.soft_mem_size
do_beam_model = FLAGS.train_beam_freq > 0.0001 and FLAGS.beam_size > 1
beam_size = FLAGS.beam_size if FLAGS.mode > 0 and not do_beam_model else 1
beam_size = min(beam_size, FLAGS.beam_size)
beam_model = None
def make_ngpu(cur_beam_size, back):
return ngpu.NeuralGPU(
FLAGS.nmaps, FLAGS.vec_size, FLAGS.vocab_size, o,
FLAGS.dropout, max_grad_norm, FLAGS.cutoff, FLAGS.nconvs,
FLAGS.kw, FLAGS.kh, FLAGS.height, FLAGS.mem_size,
lr / math.sqrt(FLAGS.num_replicas), min_length + 3, FLAGS.num_gpus,
FLAGS.num_replicas, FLAGS.grad_noise_scale, max_sampling_rate,
atrous=FLAGS.atrous, do_rnn=FLAGS.rnn_baseline,
do_layer_norm=FLAGS.layer_norm, beam_size=cur_beam_size, backward=back)
if sess is None:
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
model = make_ngpu(beam_size, True)
if do_beam_model:
tf.get_variable_scope().reuse_variables()
beam_model = make_ngpu(FLAGS.beam_size, False)
else:
model = make_ngpu(beam_size, True)
if do_beam_model:
tf.get_variable_scope().reuse_variables()
beam_model = make_ngpu(FLAGS.beam_size, False)
sv = None
if sess is None:
# The supervisor configuration has a few overriden options.
sv = tf.train.Supervisor(logdir=checkpoint_dir,
is_chief=(FLAGS.task < 1),
saver=model.saver,
summary_op=None,
save_summaries_secs=60,
save_model_secs=15 * 60,
global_step=model.global_step)
config = tf.ConfigProto(allow_soft_placement=True)
sess = sv.PrepareSession(FLAGS.master, config=config)
data.print_out("Created model. Checkpoint dir %s" % checkpoint_dir)
# Load model from parameters if a checkpoint exists.
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + ".index"):
data.print_out("Reading model parameters from %s"
% ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
elif sv is None:
sess.run(tf.global_variables_initializer())
data.print_out("Initialized variables (no supervisor mode).")
elif FLAGS.task < 1 and FLAGS.mem_size > 0:
# sess.run(model.mem_norm_op)
data.print_out("Created new model and normalized mem (on chief).")
# Return the model and needed variables.
return (model, beam_model, min_length, max_length, checkpoint_dir,
(global_train_set, dev_set, en_path, fr_path), sv, sess)
def m_step(model, beam_model, sess, batch_size, inp, target, bucket, nsteps, p):
"""Evaluation multi-step for program synthesis."""
state, scores, hist = None, [[-11.0 for _ in xrange(batch_size)]], []
for _ in xrange(nsteps):
# Get the best beam (no training, just forward model).
new_target, new_first, new_inp, new_scores = get_best_beam(
beam_model, sess, inp, target,
batch_size, FLAGS.beam_size, bucket, hist, p, test_mode=True)
hist.append(new_first)
_, _, _, state = model.step(sess, inp, new_target, False, state=state)
inp = new_inp
scores.append([max(scores[-1][i], new_scores[i])
for i in xrange(batch_size)])
# The final step with the true target.
loss, res, _, _ = model.step(sess, inp, target, False, state=state)
return loss, res, new_target, scores[1:]
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
offset=None, beam_model=None):
"""Test model on test data of length l using the given session."""
if not dev[p][bin_id]:
data.print_out(" bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
% (bin_id, data.bins[bin_id], p))
return 1.0, 1.0, 0.0
inpt, target = data.get_batch(
bin_id, batch_size, dev[p], FLAGS.height, offset)
if FLAGS.beam_size > 1 and beam_model:
loss, res, new_tgt, scores = m_step(
model, beam_model, sess, batch_size, inpt, target, bin_id,
FLAGS.eval_beam_steps, p)
score_avgs = [sum(s) / float(len(s)) for s in scores]
score_maxs = [max(s) for s in scores]
score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
for i in xrange(FLAGS.eval_beam_steps)]
data.print_out(" == scores (avg, max): %s" % "; ".join(score_str))
errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
nprint, new_tgt, scores[-1])
else:
loss, res, _, _ = model.step(sess, inpt, target, False)
errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
nprint)
seq_err = float(seq_err) / batch_size
if total > 0:
errors = float(errors) / total
if print_out:
data.print_out(" bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
% (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
100 * errors, 100 * seq_err))
return (errors, seq_err, loss)
def assign_vectors(word_vector_file, embedding_key, vocab_path, sess):
"""Assign the embedding_key variable from the given word vectors file."""
# For words in the word vector file, set their embedding at start.
if not tf.gfile.Exists(word_vector_file):
data.print_out("Word vector file does not exist: %s" % word_vector_file)
sys.exit(1)
vocab, _ = wmt.initialize_vocabulary(vocab_path)
vectors_variable = [v for v in tf.trainable_variables()
if embedding_key == v.name]
if len(vectors_variable) != 1:
data.print_out("Word vector variable not found or too many.")
sys.exit(1)
vectors_variable = vectors_variable[0]
vectors = vectors_variable.eval()
data.print_out("Pre-setting word vectors from %s" % word_vector_file)
with tf.gfile.GFile(word_vector_file, mode="r") as f:
# Lines have format: dog 0.045123 -0.61323 0.413667 ...
for line in f:
line_parts = line.split()
# The first part is the word.
word = line_parts[0]
if word in vocab:
# Remaining parts are components of the vector.
word_vector = np.array(map(float, line_parts[1:]))
if len(word_vector) != FLAGS.vec_size:
data.print_out("Warn: Word '%s', Expecting vector size %d, "
"found %d" % (word, FLAGS.vec_size,
len(word_vector)))
else:
vectors[vocab[word]] = word_vector
# Assign the modified vectors to the vectors_variable in the graph.
sess.run([vectors_variable.initializer],
{vectors_variable.initializer.inputs[1]: vectors})
def print_vectors(embedding_key, vocab_path, word_vector_file):
"""Print vectors from the given variable."""
_, rev_vocab = wmt.initialize_vocabulary(vocab_path)
vectors_variable = [v for v in tf.trainable_variables()
if embedding_key == v.name]
if len(vectors_variable) != 1:
data.print_out("Word vector variable not found or too many.")
sys.exit(1)
vectors_variable = vectors_variable[0]
vectors = vectors_variable.eval()
l, s = vectors.shape[0], vectors.shape[1]
data.print_out("Printing %d word vectors from %s to %s."
% (l, embedding_key, word_vector_file))
with tf.gfile.GFile(word_vector_file, mode="w") as f:
# Lines have format: dog 0.045123 -0.61323 0.413667 ...
for i in xrange(l):
f.write(rev_vocab[i])
for j in xrange(s):
f.write(" %.8f" % vectors[i][j])
f.write("\n")
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
"""Get a random bucket id."""
# Choose a bucket according to data distribution. Pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
if train_buckets_scale_c[i] > random_number_01])
while bucket_id > 0 and not data_set[bucket_id]:
bucket_id -= 1
for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
if data.bins[bucket_id] > max_cur_length:
random_number_01 = min(random_number_01, np.random.random_sample())
bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
if train_buckets_scale_c[i] > random_number_01])
while bucket_id > 0 and not data_set[bucket_id]:
bucket_id -= 1
return bucket_id
def score_beams(beams, target, inp, history, p,
print_out=False, test_mode=False):
"""Score beams."""
if p == "progsynth":
return score_beams_prog(beams, target, inp, history, print_out, test_mode)
elif test_mode:
return beams[0], 10.0 if str(beams[0][:len(target)]) == str(target) else 0.0
else:
history_s = [str(h) for h in history]
best, best_score, tgt, eos_id = None, -1000.0, target, None
if p == "wmt":
eos_id = wmt.EOS_ID
if eos_id and eos_id in target:
tgt = target[:target.index(eos_id)]
for beam in beams:
if eos_id and eos_id in beam:
beam = beam[:beam.index(eos_id)]
l = min(len(tgt), len(beam))
score = len([i for i in xrange(l) if tgt[i] == beam[i]]) / float(len(tgt))
hist_score = 20.0 if str([b for b in beam if b > 0]) in history_s else 0.0
if score < 1.0:
score -= hist_score
if score > best_score:
best = beam
best_score = score
return best, best_score
def score_beams_prog(beams, target, inp, history, print_out=False,
test_mode=False):
"""Score beams for program synthesis."""
tgt_prog = linearize(target, program_utils.prog_vocab, True, 1)
hist_progs = [linearize(h, program_utils.prog_vocab, True, 1)
for h in history]
tgt_set = set(target)
if print_out:
print("target: ", tgt_prog)
inps, tgt_outs = [], []
for i in xrange(3):
ilist = [inp[i + 1, l] for l in xrange(inp.shape[1])]
clist = [program_utils.prog_vocab[x] for x in ilist if x > 0]
olist = clist[clist.index("]") + 1:] # outputs
clist = clist[1:clist.index("]")] # inputs
inps.append([int(x) for x in clist])
if olist[0] == "[": # olist may be [int] or just int
tgt_outs.append(str([int(x) for x in olist[1:-1]]))
else:
if len(olist) == 1:
tgt_outs.append(olist[0])
else:
print([program_utils.prog_vocab[x] for x in ilist if x > 0])
print(olist)
print(tgt_prog)
print(program_utils.evaluate(tgt_prog, {"a": inps[-1]}))
print("AAAAA")
tgt_outs.append(olist[0])
if not test_mode:
for _ in xrange(7):
ilen = np.random.randint(len(target) - 3) + 1
inps.append([random.choice(range(-15, 15)) for _ in range(ilen)])
tgt_outs.extend([program_utils.evaluate(tgt_prog, {"a": inp})
for inp in inps[3:]])
best, best_prog, best_score = None, "", -1000.0
for beam in beams:
b_prog = linearize(beam, program_utils.prog_vocab, True, 1)
b_set = set(beam)
jsim = len(tgt_set & b_set) / float(len(tgt_set | b_set))
b_outs = [program_utils.evaluate(b_prog, {"a": inp}) for inp in inps]
errs = len([x for x in b_outs if x == "ERROR"])
imatches = len([i for i in xrange(3) if b_outs[i] == tgt_outs[i]])
perfect = 10.0 if imatches == 3 else 0.0
hist_score = 20.0 if b_prog in hist_progs else 0.0
if test_mode:
score = perfect - errs
else:
matches = len([i for i in xrange(10) if b_outs[i] == tgt_outs[i]])
score = perfect + matches + jsim - errs
if score < 10.0:
score -= hist_score
# print b_prog
# print "jsim: ", jsim, " errs: ", errs, " mtchs: ", matches, " s: ", score
if score > best_score:
best = beam
best_prog = b_prog
best_score = score
if print_out:
print("best score: ", best_score, " best prog: ", best_prog)
return best, best_score
def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size,
bucket, history, p, test_mode=False):
"""Run beam_model, score beams, and return the best as target and in input."""
_, output_logits, _, _ = beam_model.step(
sess, inp, target, None, beam_size=FLAGS.beam_size)
new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp)
for b in xrange(batch_size):
outputs = []
history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])]
for h in history]
for beam_idx in xrange(beam_size):
outputs.append([int(o[beam_idx * batch_size + b])
for o in output_logits])
target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])]
best, best_score = score_beams(
outputs, [t for t in target_t if t > 0], inp[b, :, :],
[[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode)
scores.append(best_score)
if 1 in best: # Only until _EOS.
best = best[:best.index(1) + 1]
best += [0 for _ in xrange(len(target_t) - len(best))]
new_targets.append([best])
first, _ = score_beams(
outputs, [t for t in target_t if t > 0], inp[b, :, :],
[[t for t in h if t > 0] for h in history_b], p, test_mode=True)
if 1 in first: # Only until _EOS.
first = first[:first.index(1) + 1]
first += [0 for _ in xrange(len(target_t) - len(first))]
new_inp[b, 0, :] = np.array(first, dtype=np.int32)
new_firsts.append([first])
# Change target if we found a great answer.
new_target = np.array(new_targets, dtype=np.int32)
for b in xrange(batch_size):
if scores[b] >= 10.0:
target[b, 0, :] = new_target[b, 0, :]
new_first = np.array(new_firsts, dtype=np.int32)
return new_target, new_first, new_inp, scores
def train():
"""Train the model."""
batch_size = FLAGS.batch_size * FLAGS.num_gpus
(model, beam_model, min_length, max_length, checkpoint_dir,
(train_set, dev_set, en_vocab_path, fr_vocab_path), sv, sess) = initialize()
with sess.as_default():
quant_op = model.quantize_op
max_cur_length = min(min_length + 3, max_length)
prev_acc_perp = [1000000 for _ in xrange(5)]
prev_seq_err = 1.0
is_chief = FLAGS.task < 1
do_report = False
# Main traning loop.
while not sv.ShouldStop():
global_step, max_cur_length, learning_rate = sess.run(
[model.global_step, model.cur_length, model.lr])
acc_loss, acc_l1, acc_total, acc_errors, acc_seq_err = 0.0, 0.0, 0, 0, 0
acc_grad_norm, step_count, step_c1, step_time = 0.0, 0, 0, 0.0
# For words in the word vector file, set their embedding at start.
bound1 = FLAGS.steps_per_checkpoint - 1
if FLAGS.word_vector_file_en and global_step < bound1 and is_chief:
assign_vectors(FLAGS.word_vector_file_en, "embedding:0",
en_vocab_path, sess)
if FLAGS.max_target_vocab < 1:
assign_vectors(FLAGS.word_vector_file_en, "target_embedding:0",
en_vocab_path, sess)
if FLAGS.word_vector_file_fr and global_step < bound1 and is_chief:
assign_vectors(FLAGS.word_vector_file_fr, "embedding:0",
fr_vocab_path, sess)
if FLAGS.max_target_vocab < 1:
assign_vectors(FLAGS.word_vector_file_fr, "target_embedding:0",
fr_vocab_path, sess)
for _ in xrange(FLAGS.steps_per_checkpoint):
step_count += 1
step_c1 += 1
global_step = int(model.global_step.eval())
train_beam_anneal = global_step / float(FLAGS.train_beam_anneal)
train_beam_freq = FLAGS.train_beam_freq * min(1.0, train_beam_anneal)
p = random.choice(FLAGS.problem.split("-"))
train_set = global_train_set[p][-1]
bucket_id = get_bucket_id(train_buckets_scale[p][-1], max_cur_length,
train_set)
# Prefer longer stuff 60% of time if not wmt.
if np.random.randint(100) < 60 and FLAGS.problem != "wmt":
bucket1 = get_bucket_id(train_buckets_scale[p][-1], max_cur_length,
train_set)
bucket_id = max(bucket1, bucket_id)
# Run a step and time it.
start_time = time.time()
inp, target = data.get_batch(bucket_id, batch_size, train_set,
FLAGS.height)
noise_param = math.sqrt(math.pow(global_step + 1, -0.55) *
prev_seq_err) * FLAGS.grad_noise_scale
# In multi-step mode, we use best from beam for middle steps.
state, new_target, scores, history = None, None, None, []
while (FLAGS.beam_size > 1 and
train_beam_freq > np.random.random_sample()):
# Get the best beam (no training, just forward model).
new_target, new_first, new_inp, scores = get_best_beam(
beam_model, sess, inp, target,
batch_size, FLAGS.beam_size, bucket_id, history, p)
history.append(new_first)
# Training step with the previous input and the best beam as target.
_, _, _, state = model.step(sess, inp, new_target, FLAGS.do_train,
noise_param, update_mem=True, state=state)
# Change input to the new one for the next step.
inp = new_inp
# If all results are great, stop (todo: not to wait for all?).
if FLAGS.nprint > 1:
print(scores)
if sum(scores) / float(len(scores)) >= 10.0:
break
# The final step with the true target.
loss, res, gnorm, _ = model.step(
sess, inp, target, FLAGS.do_train, noise_param,
update_mem=True, state=state)
step_time += time.time() - start_time
acc_grad_norm += 0.0 if gnorm is None else float(gnorm)
# Accumulate statistics.
acc_loss += loss
acc_l1 += loss
errors, total, seq_err = data.accuracy(
inp, res, target, batch_size, 0, new_target, scores)
if FLAGS.nprint > 1:
print("seq_err: ", seq_err)
acc_total += total
acc_errors += errors
acc_seq_err += seq_err
# Report summary every 10 steps.
if step_count + 3 > FLAGS.steps_per_checkpoint:
do_report = True # Don't polute plot too early.
if is_chief and step_count % 10 == 1 and do_report:
cur_loss = acc_l1 / float(step_c1)
acc_l1, step_c1 = 0.0, 0
cur_perp = data.safe_exp(cur_loss)
summary = tf.Summary()
summary.value.extend(
[tf.Summary.Value(tag="log_perplexity", simple_value=cur_loss),
tf.Summary.Value(tag="perplexity", simple_value=cur_perp)])
sv.SummaryComputed(sess, summary, global_step)
# Normalize and print out accumulated statistics.
acc_loss /= step_count
step_time /= FLAGS.steps_per_checkpoint
acc_seq_err = float(acc_seq_err) / (step_count * batch_size)
prev_seq_err = max(0.0, acc_seq_err - 0.02) # No noise at error < 2%.
acc_errors = float(acc_errors) / acc_total if acc_total > 0 else 1.0
t_size = float(sum([len(x) for x in train_set])) / float(1000000)
msg = ("step %d step-time %.2f train-size %.3f lr %.6f grad-norm %.4f"
% (global_step + 1, step_time, t_size, learning_rate,
acc_grad_norm / FLAGS.steps_per_checkpoint))
data.print_out("%s len %d ppl %.6f errors %.2f sequence-errors %.2f" %
(msg, max_cur_length, data.safe_exp(acc_loss),
100*acc_errors, 100*acc_seq_err))
# If errors are below the curriculum threshold, move curriculum forward.
is_good = FLAGS.curriculum_ppx > data.safe_exp(acc_loss)
is_good = is_good and FLAGS.curriculum_seq > acc_seq_err
if is_good and is_chief:
if FLAGS.quantize:
# Quantize weights.
data.print_out(" Quantizing parameters.")
sess.run([quant_op])
# Increase current length (until the next with training data).
sess.run(model.cur_length_incr_op)
# Forget last perplexities if we're not yet at the end.
if max_cur_length < max_length:
prev_acc_perp.append(1000000)
# Lower learning rate if we're worse than the last 5 checkpoints.
acc_perp = data.safe_exp(acc_loss)
if acc_perp > max(prev_acc_perp[-5:]) and is_chief:
sess.run(model.lr_decay_op)
prev_acc_perp.append(acc_perp)
# Save checkpoint.
if is_chief:
checkpoint_path = os.path.join(checkpoint_dir, "neural_gpu.ckpt")
model.saver.save(sess, checkpoint_path,
global_step=model.global_step)
# Run evaluation.
bin_bound = 4
for p in FLAGS.problem.split("-"):
total_loss, total_err, tl_counter = 0.0, 0.0, 0
for bin_id in xrange(len(data.bins)):
if bin_id < bin_bound or bin_id % FLAGS.eval_bin_print == 1:
err, _, loss = single_test(bin_id, model, sess, FLAGS.nprint,
batch_size * 4, dev_set, p,
beam_model=beam_model)
if loss > 0.0:
total_loss += loss
total_err += err
tl_counter += 1
test_loss = total_loss / max(1, tl_counter)
test_err = total_err / max(1, tl_counter)
test_perp = data.safe_exp(test_loss)
summary = tf.Summary()
summary.value.extend(
[tf.Summary.Value(tag="test/%s/loss" % p, simple_value=test_loss),
tf.Summary.Value(tag="test/%s/error" % p, simple_value=test_err),
tf.Summary.Value(tag="test/%s/perplexity" % p,
simple_value=test_perp)])
sv.SummaryComputed(sess, summary, global_step)
def linearize(output, rev_fr_vocab, simple_tokenizer=None, eos_id=wmt.EOS_ID):
# If there is an EOS symbol in outputs, cut them at that point (WMT).
if eos_id in output:
output = output[:output.index(eos_id)]
# Print out French sentence corresponding to outputs.
if simple_tokenizer or FLAGS.simple_tokenizer:
vlen = len(rev_fr_vocab)
def vget(o):
if o < vlen:
return rev_fr_vocab[o]
return "UNK"
return " ".join([vget(o) for o in output])
else:
return wmt.basic_detokenizer([rev_fr_vocab[o] for o in output])
def evaluate():
"""Evaluate an existing model."""
batch_size = FLAGS.batch_size * FLAGS.num_gpus
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
(model, beam_model, _, _, _,
(_, dev_set, en_vocab_path, fr_vocab_path), _, sess) = initialize(sess)
for p in FLAGS.problem.split("-"):
for bin_id in xrange(len(data.bins)):
if (FLAGS.task >= 0 and bin_id > 4) or (FLAGS.nprint == 0 and
bin_id > 8 and p == "wmt"):
break
single_test(bin_id, model, sess, FLAGS.nprint, batch_size, dev_set, p,
beam_model=beam_model)
path = FLAGS.test_file_prefix
xid = "" if FLAGS.task < 0 else ("%.4d" % (FLAGS.task+FLAGS.decode_offset))
en_path, fr_path = path + ".en" + xid, path + ".fr" + xid
# Evaluate the test file if they exist.
if path and tf.gfile.Exists(en_path) and tf.gfile.Exists(fr_path):
data.print_out("Translating test set %s" % en_path)
# Read lines.
en_lines, fr_lines = [], []
with tf.gfile.GFile(en_path, mode="r") as f:
for line in f:
en_lines.append(line.strip())
with tf.gfile.GFile(fr_path, mode="r") as f:
for line in f:
fr_lines.append(line.strip())
# Tokenize and convert to ids.
en_vocab, _ = wmt.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = wmt.initialize_vocabulary(fr_vocab_path)
if FLAGS.simple_tokenizer:
en_ids = [wmt.sentence_to_token_ids(
l, en_vocab, tokenizer=wmt.space_tokenizer,
normalize_digits=FLAGS.normalize_digits)
for l in en_lines]
else:
en_ids = [wmt.sentence_to_token_ids(l, en_vocab) for l in en_lines]
# Translate.
results = []
for idx, token_ids in enumerate(en_ids):
if idx % 5 == 0:
data.print_out("Translating example %d of %d." % (idx, len(en_ids)))
# Which bucket does it belong to?
buckets = [b for b in xrange(len(data.bins))
if data.bins[b] >= len(token_ids)]
if buckets:
result, result_cost = [], 100000000.0
for bucket_id in buckets:
if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR:
break
# Get a 1-element batch to feed the sentence to the model.
used_batch_size = 1 # batch_size
inp, target = data.get_batch(
bucket_id, used_batch_size, None, FLAGS.height,
preset=([token_ids], [[]]))
loss, output_logits, _, _ = model.step(
sess, inp, target, None, beam_size=FLAGS.beam_size)
outputs = [int(o[0]) for o in output_logits]
loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm)
if FLAGS.simple_tokenizer:
cur_out = outputs
if wmt.EOS_ID in cur_out:
cur_out = cur_out[:cur_out.index(wmt.EOS_ID)]
res_tags = [rev_fr_vocab[o] for o in cur_out]
bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags)
loss += 1000.0 * bad_words + 100.0 * bad_brack
# print (bucket_id, loss)
if loss < result_cost:
result = outputs
result_cost = loss
final = linearize(result, rev_fr_vocab)
results.append("%s\t%s\n" % (final, fr_lines[idx]))
# print result_cost
sys.stderr.write(results[-1])
sys.stderr.flush()
else:
sys.stderr.write("TOOO_LONG\t%s\n" % fr_lines[idx])
sys.stderr.flush()
if xid:
decode_suffix = "beam%dln%dn" % (FLAGS.beam_size,
int(100 * FLAGS.length_norm))
with tf.gfile.GFile(path + ".res" + decode_suffix + xid, mode="w") as f:
for line in results:
f.write(line)
def mul(l):
res = 1.0
for s in l:
res *= s
return res
def interactive():
"""Interactively probe an existing model."""
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Initialize model.
(model, _, _, _, _, (_, _, en_path, fr_path), _, _) = initialize(sess)
# Load vocabularies.
en_vocab, rev_en_vocab = wmt.initialize_vocabulary(en_path)
_, rev_fr_vocab = wmt.initialize_vocabulary(fr_path)
# Print out vectors and variables.
if FLAGS.nprint > 0 and FLAGS.word_vector_file_en:
print_vectors("embedding:0", en_path, FLAGS.word_vector_file_en)
if FLAGS.nprint > 0 and FLAGS.word_vector_file_fr:
print_vectors("target_embedding:0", fr_path, FLAGS.word_vector_file_fr)
total = 0
for v in tf.trainable_variables():
shape = v.get_shape().as_list()
total += mul(shape)
print(v.name, shape, mul(shape))
print(total)
# Start interactive loop.
sys.stdout.write("Input to Neural GPU Translation Model.\n")
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline(), ""
while inpt:
cures = []
# Get token-ids for the input sentence.
if FLAGS.simple_tokenizer:
token_ids = wmt.sentence_to_token_ids(
inpt, en_vocab, tokenizer=wmt.space_tokenizer,
normalize_digits=FLAGS.normalize_digits)
else:
token_ids = wmt.sentence_to_token_ids(inpt, en_vocab)
print([rev_en_vocab[t] for t in token_ids])
# Which bucket does it belong to?
buckets = [b for b in xrange(len(data.bins))
if data.bins[b] >= max(len(token_ids), len(cures))]
if cures:
buckets = [buckets[0]]
if buckets:
result, result_cost = [], 10000000.0
for bucket_id in buckets:
if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR:
break
glen = 1
for gen_idx in xrange(glen):
# Get a 1-element batch to feed the sentence to the model.
inp, target = data.get_batch(
bucket_id, 1, None, FLAGS.height, preset=([token_ids], [cures]))
loss, output_logits, _, _ = model.step(
sess, inp, target, None, beam_size=FLAGS.beam_size,
update_mem=False)
# If it is a greedy decoder, outputs are argmaxes of output_logits.
if FLAGS.beam_size > 1:
outputs = [int(o) for o in output_logits]
else:
loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm)
outputs = [int(np.argmax(logit, axis=1))
for logit in output_logits]
print([rev_fr_vocab[t] for t in outputs])
print(loss, data.bins[bucket_id])
print(linearize(outputs, rev_fr_vocab))
cures.append(outputs[gen_idx])
print(cures)
print(linearize(cures, rev_fr_vocab))
if FLAGS.simple_tokenizer:
cur_out = outputs
if wmt.EOS_ID in cur_out:
cur_out = cur_out[:cur_out.index(wmt.EOS_ID)]
res_tags = [rev_fr_vocab[o] for o in cur_out]
bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags)
loss += 1000.0 * bad_words + 100.0 * bad_brack
if loss < result_cost:
result = outputs
result_cost = loss
print("FINAL", result_cost)
print([rev_fr_vocab[t] for t in result])
print(linearize(result, rev_fr_vocab))
else:
print("TOOO_LONG")
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline(), ""
def main(_):
if FLAGS.mode == 0:
train()
elif FLAGS.mode == 1:
evaluate()
else:
interactive()
if __name__ == "__main__":
tf.app.run()
|
loading_screen.py
|
import itertools
import threading
import time
import sys
class LoadingScreen():
def __init__(self):
self.done = False
self.t = None
def __animate(self, data_type):
for c in itertools.cycle(['|', '/', '-', '\\']):
if self.done:
break
sys.stdout.write(f'\rDownloading {data_type} data from KNMI' + c)
sys.stdout.flush()
time.sleep(0.2)
def start(self, data_type):
self.t = threading.Thread(target=self.__animate, args=[data_type])
self.t.start()
def stop(self):
sys.stdout.flush()
sys.stdout.write('\rSuccess\n')
self.done = True
|
pesa2.py
|
# -*- coding: utf-8 -*-
#"""
#Created on Sun Jun 28 18:21:05 2020
#
#@author: Majdi Radaideh
#"""
from neorl.hybrid.pesacore.er import ExperienceReplay
from neorl.hybrid.pesacore.gwo import GWOmod
from neorl.hybrid.pesacore.de import DEmod
from neorl.hybrid.pesacore.woa import WOAmod
from neorl.hybrid.pesacore.es import ESMod
from copy import deepcopy
from multiprocessing import Process, Queue
import random
import numpy as np
from collections import defaultdict
import time
import sys
import uuid
#multiprocessing trick to paralllelize nested functions in python (un-picklable objects!)
def globalize(func):
def result(*args, **kwargs):
return -func(*args, **kwargs)
result.__name__ = result.__qualname__ = uuid.uuid4().hex
setattr(sys.modules[result.__module__], result.__name__, result)
return result
class PESA2(ExperienceReplay):
"""
Prioritized replay for Evolutionary Swarm Algorithms: PESA 2 (Modern Version)
A hybrid algorithm of GWO, DE, and WOA
*PESA2 Major Parameters*
:param mode: (str) problem type, either "min" for minimization problem or "max" for maximization
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param fit: (function) the fitness function
:param R_frac: (int) fraction of ``npop``, ``nwolves``, ``nwhales`` to survive to the next generation.
Also, ``R_frac`` equals to the number of individuals to replay from the memory
:param memory_size: (int) max size of the replay memory (if None, ``memory_size`` is built to accommodate all samples during search)
:param alpha_init: (float) initial value of the prioritized replay coefficient (See **Notes** below)
:param alpha_end: (float) final value of the prioritized replay coefficient (See **Notes** below)
*PESA2 Auxiliary Parameters (for the internal algorithms)*
:param npop: (int) for **DE**, total number of individuals in DE population
:param CR: (float) for **DE**, crossover probability between [0,1]
:param F: (float) for **DE**, differential/mutation weight between [0,2]
:param nwolves: (float) for **GWO**, number of wolves for GWO
:param nwhales: (float) for **WOA**, number of whales in the population of WOA
*PESA2 Misc. Parameters*
:param ncores: (int) number of parallel processors
:param seed: (int) random seed for sampling
"""
def __init__ (self, mode, bounds, fit, R_frac=0.5, #general parameters
memory_size=None, alpha_init=0.1, alpha_end=1, #replay parameters
nwolves=5, #GOW parameters
npop=50, CR=0.7, F=0.5, #DE parameters
nwhales=10, #WOA parameters
ncores=1, seed=None): #misc parameters
if seed:
random.seed(seed)
np.random.seed(seed)
#--mir
self.mode=mode
if mode == 'max':
self.FIT=fit
elif mode == 'min':
self.FIT = globalize(lambda x: fit(x)) #use the function globalize to serialize the nested fit
else:
raise ValueError('--error: The mode entered by user is invalid, use either `min` or `max`')
if ncores > 1:
if nwolves >= npop:
nwolves=npop
else:
assert npop % nwolves==0, '--error: since ncores > 1, for max efficiency of PESA2, choose npop ({}) and nwolves ({}) that are either equal or divisible, e.g. npop=60, nwolves=5'.format(npop, nwolves)
if nwhales >= npop:
nwhales=npop
else:
assert npop % nwhales==0, '--error: since ncores > 1, for max efficiency of PESA2, choose npop ({}) and nwhales ({}) that are either equal or divisible, e.g. npop=60, nwhales=5'.format(npop, nwhales)
self.GWO_gen=int(npop/nwolves)
self.WOA_gen=int(npop/nwhales)
if self.GWO_gen < 1:
self.GWO_gen=1
if self.WOA_gen < 1:
self.WOA_gen=1
self.BOUNDS=bounds
self.NPOP=npop
self.ncores=ncores
if ncores <= 3:
self.NCORES=1
# option for first-level parallelism
self.PROC=False
else:
# option for first-level parallelism
self.PROC=True
self.NCORES=int(ncores/3)
self.SEED=seed
#--------------------
#Experience Replay
#--------------------
self.MODE='prior'
self.ALPHA0=alpha_init
self.ALPHA1=alpha_end
#--------------------
# GWO hyperparameters
#--------------------
self.NWOLVES=nwolves
#--------------------
# WOA HyperParameters
#--------------------
self.NWHALES=nwhales
#--------------------
# DE HyperParameters
#--------------------
self.F=F
self.CR=CR
#-------------------------------
#Memory Supply for each method
#-------------------------------
assert 0 <= R_frac <= 1, '--error: The value of R_frac ({}) MUST be between 0 and 1'.format(R_frac)
self.MU_DE=int(R_frac*self.NPOP)
self.MU_WOA=int(R_frac*self.NWHALES)
self.MU_GWO=int(R_frac*self.NWOLVES)
#--------------------
# Fixed/Derived parameters
#--------------------
self.nx=len(self.BOUNDS) #all
self.memory_size=memory_size
def evolute(self, ngen, x0=None, replay_every=1, warmup=100, verbose=True):
"""
This function evolutes the PESA2 algorithm for number of generations.
:param ngen: (int) number of generations to evolute
:param x0: (list of lists) initial samples to start the replay memory (``len(x0)`` must be equal or more than ``npop``)
:param replay_every: (int) perform memory replay every number of generations, default: replay after every generation
:param warmup: (int) number of random warmup samples to initialize the replay memory and must be equal or more than ``npop`` (only used if ``x0=None``)
:param verbose: (int) print statistics to screen, 0: no print, 1: PESA print, 2: detailed print
:return: (dict) dictionary containing major PESA search results
"""
self.verbose=verbose
self.NGEN=int(ngen/replay_every)
self.STEPS=self.NGEN*self.NPOP #all
if self.memory_size:
self.MEMORY_SIZE=self.memory_size
else:
self.MEMORY_SIZE=self.STEPS*3+1 #PESA
#-------------------------------------------------------
# Check if initial pop is provided as initial guess
#-------------------------------------------------------
if x0:
# use provided initial guess
warm=ESMod(bounds=self.BOUNDS, fit=self.FIT, mu=self.NPOP, lambda_=self.LAMBDA, ncores=self.ncores)
x0size=len(x0)
assert x0size >= self.NPOP, 'the number of lists in x0 ({}) must be more than or equal npop ({})'.format(x0size, self.NPOP)
self.pop0=warm.init_pop(warmup=x0size, x_known=x0) #initial population for all methods (use external ES modules for initialization)
else:
#create initial guess
assert warmup >= self.NPOP, 'the number of warmup samples ({}) must be more than or equal npop ({})'.format(warmup, self.NPOP)
warm=ESMod(bounds=self.BOUNDS, fit=self.FIT, mu=self.NPOP, lambda_=self.NPOP, ncores=self.ncores)
self.pop0=warm.init_pop(warmup=warmup) #initial population for all methods (use external ES modules for initialization)
self.fit_hist=[]
#------------------------------
# Step 1: Initialize the memory
#------------------------------
self.mymemory=ExperienceReplay(size=self.MEMORY_SIZE) #memory object
xvec0, obj0=[self.pop0[item][0] for item in self.pop0], [self.pop0[item][2] for item in self.pop0] #parse the initial samples
self.mymemory.add(xvec=xvec0, obj=obj0, method=['na']*len(xvec0)) # add initial samples to the replay memory
#--------------------------------
# Step 2: Initialize all methods
#--------------------------------
# Obtain initial population for all methods
x0_gwo, fit0_gwo, x0_de, fit0_de, x0_woa, fit0_woa=self.init_guess(pop0=self.pop0)
# Initialize GWO class
gwo=GWOmod(mode='max', bounds=self.BOUNDS, fit=self.FIT, nwolves=self.NWOLVES, ncores=self.NCORES, seed=self.SEED)
# Initialize DE class
de=DEmod(bounds=self.BOUNDS, fit=self.FIT, npop=self.NPOP, F=self.F, CR=self.CR, ncores=self.NCORES, seed=self.SEED)
# Initialize WOA class
woa=WOAmod(mode='max', bounds=self.BOUNDS, fit=self.FIT, nwhales=self.NWHALES,
ncores=self.NCORES, seed=self.SEED)
#--------------------------------
# Step 3: Initialize PESA engine
#--------------------------------
#Use initial samples as first guess
self.gwo_next=deepcopy(x0_gwo)
self.de_next=deepcopy(x0_de)
self.woa_next=deepcopy(x0_woa)
self.STEP0=1 #step counter
self.ALPHA=self.ALPHA0 #set alpha to alpha0
#--------------------------------
# Step 4: PESA evolution
#--------------------------------
for gen in range(1,self.NGEN+1):
#-------------------------------------------------------------------------------------------------------------------
# Step 5: evolute all methods for 1 generation
#-------------------------------------------------------------------------------------------------------------------
#**********************************
#--Step 5A: Complete PARALEL calcs
# via multiprocess.Process
#*********************************
if self.PROC:
QGWO = Queue(); QDE=Queue(); QWOA=Queue()
def gwo_worker():
xgwo_best, ygwo_best, gwo_new= gwo.evolute(ngen=self.GWO_gen*replay_every, x0=self.gwo_next, verbose=0)
QGWO.put((xgwo_best, ygwo_best, gwo_new))
def de_worker():
random.seed(self.SEED)
xde_best, yde_best, de_new=de.evolute(ngen=1*replay_every,x0=self.de_next, verbose=0)
QDE.put((xde_best, yde_best, de_new))
def woa_worker():
random.seed(self.SEED)
xwoa_best, ywoa_best, woa_new=woa.evolute(ngen=self.WOA_gen*replay_every,x0=self.woa_next, verbose=0)
QWOA.put((xwoa_best, ywoa_best, woa_new))
Process(target=gwo_worker).start()
Process(target=de_worker).start()
Process(target=woa_worker).start()
#get the values from the Queue
self.gwo_best, self.ygwo_best, self.gwo_next=QGWO.get()
self.de_best, self.yde_best, self.de_next=QDE.get()
self.woa_best, self.ywoa_best, self.woa_next=QWOA.get()
#*********************************
#--Step 5B: Complete Serial calcs
#*********************************
else:
self.gwo_best, self.ygwo_best, self.gwo_next= gwo.evolute(ngen=self.GWO_gen*replay_every, x0=self.gwo_next, verbose=0)
self.de_best, self.yde_best, self.de_next=de.evolute(ngen=1*replay_every,x0=self.de_next, verbose=0)
self.woa_best, self.ywoa_best, self.woa_next=woa.evolute(ngen=self.WOA_gen*replay_every, x0=self.woa_next, verbose=0)
#*********************************************************
# Step 5C: Obtain relevant statistics for this generation
#*********************************************************
self.gwo_next=self.select(pop=self.gwo_next, k=self.MU_GWO)
self.de_next=self.select(pop=self.de_next, k=self.MU_DE)
self.woa_next=self.select(pop=self.woa_next, k=self.MU_WOA)
self.STEP0=self.STEP0+self.NPOP #update step counter
if self.verbose==2:
self.printout(mode=1, gen=gen)
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#-----------------------------
# Step 6: Update the memory
#-----------------------------
self.memory_update()
#-----------------------------------------------------------------
# Step 7: Sample from the memory and prepare for next Generation
#-----------------------------------------------------------------
self.resample()
#--------------------------------------------------------
# Step 8: Anneal Alpha if priortized replay is used
#--------------------------------------------------------
if self.MODE=='prior': #anneal alpha between alpha0 (lower) and alpha1 (upper)
self.ALPHA=self.linear_anneal(step=self.STEP0, total_steps=self.STEPS, a0=self.ALPHA0, a1=self.ALPHA1)
#--------------------------------------------------------
# Step 9: Calculate the memory best and print PESA summary
#--------------------------------------------------------
self.pesa_best=self.mymemory.sample(batch_size=1,mode='greedy')[0] #`greedy` will sample the best in memory
self.fit_hist.append(self.pesa_best[1])
self.memory_size=len(self.mymemory.storage) #memory size so far
if self.verbose: #print summary data to screen
self.printout(mode=2, gen=gen)
#--mir
if self.mode=='min':
self.fitness_best=-self.pesa_best[1]
else:
self.fitness_best=self.pesa_best[1]
#--mir
if self.mode=='min':
self.fit_hist=[-item for item in self.fit_hist]
return self.pesa_best[0], self.fitness_best, self.fit_hist
def linear_anneal(self, step, total_steps, a0, a1):
#"""
#Anneal parameter between a0 and a1
#:param step: current time step
#:param total_steps: total numbe of time steps
#:param a0: lower bound of alpha/parameter
#:param a0: upper bound of alpha/parameter
#:return
# - annealed value of alpha/parameter
#"""
fraction = min(float(step) / total_steps, 1.0)
return a0 + fraction * (a1 - a0)
def select(self, pop, k=1):
#"""
#Select function sorts the population from max to min based on fitness and select k best
#Inputs:
# pop (dict): population in dictionary structure
# k (int): top k individuals are selected
#Returns:
# best_dict (dict): the new ordered dictionary with top k selected
#"""
pop=list(pop.items())
pop.sort(key=lambda e: e[1][1], reverse=True)
sorted_dict=dict(pop[:k])
#This block creates a new dict where keys are reset to 0 ... k in order to avoid unordered keys after sort
best_dict=defaultdict(list)
index=0
for key in sorted_dict:
best_dict[index].append(sorted_dict[key][0])
best_dict[index].append(sorted_dict[key][1])
index+=1
sorted_dict.clear()
return best_dict
def memory_update(self):
#"""
#This function updates the replay memory with the samples of GWO, DE, and WOA (if used)
#then remove the duplicates from the memory
#"""
gwo_x, gwo_y=[self.gwo_next[item][0] for item in self.gwo_next], [self.gwo_next[item][1] for item in self.gwo_next]
de_x, de_y=[self.de_next[item][0] for item in self.de_next], [self.de_next[item][1] for item in self.de_next]
woa_x, woa_y=[self.woa_next[item][0] for item in self.woa_next], [self.woa_next[item][1] for item in self.woa_next]
self.mymemory.add(xvec=tuple(gwo_x), obj=gwo_y, method=['gwo']*len(gwo_x))
self.mymemory.add(xvec=tuple(de_x), obj=de_y, method=['de']*len(de_x))
self.mymemory.add(xvec=tuple(woa_x), obj=woa_y, method=['woa']*len(woa_x))
def resample(self):
#"""
#This function samples data from the memory and prepares the chains for SA
#the population for ES, and the swarm for PSO for the next generation
# -SA: initial guess for the parallel chains are sampled from the memroy
# -ES: a total of ES_MEMORY (or MU) individuals are sampled from the memory and appended to ES population
# -PSO: a total of PSO_MEMORY (or MU) particles are sampled from the memory and appended to PSO swarm
#For SA: x_next and E_next particpate in next generation
#For PSO: swm_next, local_pso_next, and local_fit_next particpate in next generation
#For ES: pop_next particpates in next generation
#"""
#update the dictionary with new samples for GWO
gwo_replay=self.mymemory.sample(batch_size=self.NWOLVES-self.MU_GWO,mode=self.MODE,alpha=self.ALPHA)
index=self.MU_GWO
for sample in range(self.NWOLVES-self.MU_GWO):
self.gwo_next[index].append(gwo_replay[sample][0])
self.gwo_next[index].append(gwo_replay[sample][1])
index+=1
#update the dictionary with new samples for DE
de_replay=self.mymemory.sample(batch_size=self.NPOP-self.MU_DE,mode=self.MODE,alpha=self.ALPHA)
index=self.MU_DE
for sample in range(self.NPOP-self.MU_DE):
self.de_next[index].append(de_replay[sample][0])
self.de_next[index].append(de_replay[sample][1])
index+=1
#update the dictionary with new samples for WOA
woa_replay=self.mymemory.sample(batch_size=self.NWHALES-self.MU_WOA,mode=self.MODE,alpha=self.ALPHA)
index=self.MU_WOA
for sample in range(self.NWHALES-self.MU_WOA):
self.woa_next[index].append(woa_replay[sample][0])
self.woa_next[index].append(woa_replay[sample][1])
index+=1
#get *_next back to a list of lists for the next loop
self.gwo_next=[self.gwo_next[item][0] for item in self.gwo_next]
self.de_next=[self.de_next[item][0] for item in self.de_next]
self.woa_next=[self.woa_next[item][0] for item in self.woa_next]
def init_guess(self, pop0):
#"""
#This function takes initial guess pop0 and returns initial guesses for GWO, DE, and WOA
#to start PESA evolution
#"""
pop0=list(pop0.items())
pop0.sort(key=lambda e: e[1][2], reverse=True)
sorted_gwo=dict(pop0[:self.NWOLVES])
x0_gwo, fit0_gwo=[sorted_gwo[key][0] for key in sorted_gwo], [sorted_gwo[key][2] for key in sorted_gwo] # initial guess for GWO
sorted_de=dict(pop0[:self.NPOP])
x0_de, fit0_de=[sorted_de[key][0] for key in sorted_de], [sorted_de[key][2] for key in sorted_de] # initial guess for DE
sorted_woa=dict(pop0[:self.NWHALES])
x0_woa, fit0_woa=[sorted_woa[key][0] for key in sorted_woa], [sorted_woa[key][2] for key in sorted_woa] # initial guess for WOA
return x0_gwo, fit0_gwo, x0_de, fit0_de, x0_woa, fit0_woa
def printout(self, mode, gen):
#"""
#Print statistics to screen
#inputs:
# mode (int): 1 to print for individual algorathims and 2 to print for PESA
# gen (int): current generation number
#"""
if mode == 1:
print('***********************************************************************************************')
print('#############################################################################')
print('GWO step {}/{}, NWolves={}, Ncores={}'.format(self.STEP0-1,self.STEPS, self.NWOLVES, self.NCORES))
print('#############################################################################')
print('Statistics for generation {}'.format(gen))
print('Best Wolf Fitness:', np.round(np.max(self.ygwo_best),4) if self.mode == 'max' else -np.round(np.max(self.ygwo_best),4))
print('Best Wolf Position:', np.round(self.gwo_best,3))
print('#############################################################################')
print('*****************************************************************************')
print('DE step {}/{}, NPOP={}, F={}, CR={}, Ncores={}'.format(self.STEP0-1,self.STEPS,self.NPOP,np.round(self.F), self.CR, self.NCORES))
print('****************************************************************************')
print('Statistics for generation {}'.format(gen))
print('Best Individual Fitness:', np.round(np.max(self.yde_best),4) if self.mode == 'max' else -np.round(np.max(self.yde_best),4))
print('Best Individual Position:', np.round(self.de_best),3)
print('****************************************************************************')
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('WOA step {}/{}, NWhales={}, Ncores={}'.format(self.STEP0-1,self.STEPS, self.NWHALES, self.NCORES))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('Statistics for generation {}'.format(gen))
print('Best Whale Fitness:', np.round(np.max(self.ywoa_best),4) if self.mode == 'max' else -np.round(np.max(self.ywoa_best),4))
print('Best Whale Position:', np.round(self.woa_best,3))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
if mode == 2:
print('------------------------------------------------------------')
print('PESA2 step {}/{}, Ncores={}'.format(self.STEP0-1,self.STEPS, self.ncores))
print('------------------------------------------------------------')
print('PESA statistics for generation {}'.format(gen))
print('Best Fitness:', self.pesa_best[1] if self.mode == 'max' else -self.pesa_best[1])
print('Best Individual:', np.round(self.pesa_best[0],2))
print('ALPHA:', np.round(self.ALPHA,3))
print('Memory Size:', self.memory_size)
print('------------------------------------------------------------')
print('***********************************************************************************************')
|
__init__.py
|
"""Miscellaneous helper functions (not wiki-dependent)."""
#
# (C) Pywikibot team, 2008-2021
#
# Distributed under the terms of the MIT license.
#
import collections
import gzip
import hashlib
import inspect
import itertools
import os
import queue
import re
import stat
import subprocess
import sys
import threading
import time
import types
from collections.abc import Iterator, Mapping
from contextlib import suppress
from datetime import datetime
from distutils.version import LooseVersion, Version
from functools import wraps
from importlib import import_module
from inspect import getfullargspec
from ipaddress import ip_address
from itertools import zip_longest
from typing import Optional
from warnings import catch_warnings, showwarning, warn
from pywikibot.logging import debug
from pywikibot.tools._unidata import _first_upper_exception
try:
import bz2
except ImportError as bz2_import_error:
try:
import bz2file as bz2
warn('package bz2 was not found; using bz2file', ImportWarning)
except ImportError:
warn('package bz2 and bz2file were not found', ImportWarning)
bz2 = bz2_import_error
try:
import lzma
except ImportError as lzma_import_error:
lzma = lzma_import_error
PYTHON_VERSION = sys.version_info[:3]
_logger = 'tools'
class _NotImplementedWarning(RuntimeWarning):
"""Feature that is no longer implemented."""
pass
def is_IP(IP: str) -> bool: # noqa N802, N803
"""Verify the IP address provided is valid.
No logging is performed. Use ip_address instead to catch errors.
@param IP: IP address
"""
with suppress(ValueError):
ip_address(IP)
return True
return False
def has_module(module, version=None):
"""Check whether a module can be imported."""
try:
m = import_module(module)
except ImportError:
pass
else:
if version is None:
return True
try:
module_version = LooseVersion(m.__version__)
except AttributeError:
pass
else:
if module_version >= LooseVersion(version):
return True
warn('Module version {} is lower than requested version {}'
.format(module_version, version), ImportWarning)
return False
def empty_iterator():
# http://stackoverflow.com/a/13243870/473890
"""An iterator which does nothing."""
return
yield
class classproperty: # noqa: N801
"""
Descriptor class to access a class method as a property.
This class may be used as a decorator::
class Foo:
_bar = 'baz' # a class property
@classproperty
def bar(cls): # a class property method
return cls._bar
Foo.bar gives 'baz'.
"""
def __init__(self, cls_method):
"""Hold the class method."""
self.method = cls_method
self.__doc__ = self.method.__doc__
def __get__(self, instance, owner):
"""Get the attribute of the owner class by its method."""
return self.method(owner)
class suppress_warnings(catch_warnings): # noqa: N801
"""A decorator/context manager that temporarily suppresses warnings.
Those suppressed warnings that do not match the parameters will be raised
shown upon exit.
"""
def __init__(self, message='', category=Warning, filename=''):
"""Initialize the object.
The parameter semantics are similar to those of
`warnings.filterwarnings`.
@param message: A string containing a regular expression that the start
of the warning message must match. (case-insensitive)
@type message: str
@param category: A class (a subclass of Warning) of which the warning
category must be a subclass in order to match.
@type category: type
@param filename: A string containing a regular expression that the
start of the path to the warning module must match.
(case-sensitive)
@type filename: str
"""
self.message_match = re.compile(message, re.I).match
self.category = category
self.filename_match = re.compile(filename).match
super().__init__(record=True)
def __enter__(self):
"""Catch all warnings and store them in `self.log`."""
self.log = super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stop logging warnings and show those that do not match to params."""
super().__exit__(exc_type, exc_val, exc_tb)
for warning in self.log:
if (
not issubclass(warning.category, self.category)
or not self.message_match(str(warning.message))
or not self.filename_match(warning.filename)
):
showwarning(
warning.message, warning.category, warning.filename,
warning.lineno, warning.file, warning.line)
def __call__(self, func):
"""Decorate func to suppress warnings."""
@wraps(func)
def suppressed_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return suppressed_func
# From http://python3porting.com/preparing.html
class ComparableMixin:
"""Mixin class to allow comparing to other objects which are comparable."""
def __lt__(self, other):
"""Compare if self is less than other."""
return other > self._cmpkey()
def __le__(self, other):
"""Compare if self is less equals other."""
return other >= self._cmpkey()
def __eq__(self, other):
"""Compare if self is equal to other."""
return other == self._cmpkey()
def __ge__(self, other):
"""Compare if self is greater equals other."""
return other <= self._cmpkey()
def __gt__(self, other):
"""Compare if self is greater than other."""
return other < self._cmpkey()
def __ne__(self, other):
"""Compare if self is not equal to other."""
return other != self._cmpkey()
class DotReadableDict:
"""Parent class of Revision() and FileInfo().
Provide: __getitem__() and __repr__().
"""
def __getitem__(self, key):
"""Give access to class values by key.
Revision class may also give access to its values by keys
e.g. revid parameter may be assigned by revision['revid']
as well as revision.revid. This makes formatting strings with
% operator easier.
"""
return getattr(self, key)
def __repr__(self):
"""Return a more complete string representation."""
return repr(self.__dict__)
class frozenmap(Mapping): # noqa: N801
"""Frozen mapping, preventing write after initialisation."""
def __init__(self, data=(), **kwargs):
"""Initialize data in same ways like a dict."""
self.__data = {}
if isinstance(data, Mapping):
for key in data:
self.__data[key] = data[key]
elif hasattr(data, 'keys'):
for key in data.keys():
self.__data[key] = data[key]
else:
for key, value in data:
self.__data[key] = value
for key, value in kwargs.items():
self.__data[key] = value
def __getitem__(self, key):
return self.__data[key]
def __iter__(self):
return iter(self.__data)
def __len__(self):
return len(self.__data)
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.__data)
class LazyRegex:
"""
Regex object that obtains and compiles the regex on usage.
Instances behave like the object created using L{re.compile}.
"""
def __init__(self, pattern, flags=0):
"""
Initializer.
@param pattern: L{re} regex pattern
@type pattern: str or callable
@param flags: L{re.compile} flags
@type flags: int
"""
self.raw = pattern
self.flags = flags
super().__init__()
@property
def raw(self):
"""The raw property."""
if callable(self._raw):
self._raw = self._raw()
return self._raw
@raw.setter
def raw(self, value):
self._raw = value
self._compiled = None
@property
def flags(self):
"""The flags property."""
return self._flags
@flags.setter
def flags(self, value):
self._flags = value
self._compiled = None
def __getattr__(self, attr):
"""Compile the regex and delegate all attribute to the regex."""
if self._raw:
if not self._compiled:
self._compiled = re.compile(self.raw, self.flags)
if hasattr(self._compiled, attr):
return getattr(self._compiled, attr)
raise AttributeError('%s: attr %s not recognised'
% (self.__class__.__name__, attr))
else:
raise AttributeError('%s.raw not set' % self.__class__.__name__)
class DeprecatedRegex(LazyRegex):
"""Regex object that issues a deprecation notice."""
def __init__(self, pattern, flags=0, name=None, instead=None, since=None):
"""
Initializer.
If name is None, the regex pattern will be used as part of
the deprecation warning.
@param name: name of the object that is deprecated
@type name: str or None
@param instead: if provided, will be used to specify the replacement
of the deprecated name
@type instead: str
"""
super().__init__(pattern, flags)
self._name = name or self.raw
self._instead = instead
self._since = since
def __getattr__(self, attr):
"""Issue deprecation warning."""
issue_deprecation_warning(
self._name, self._instead, warning_class=FutureWarning,
since=self._since)
return super().__getattr__(attr)
def first_lower(string: str) -> str:
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
def first_upper(string: str) -> str:
"""
Return a string with the first character capitalized.
Empty strings are supported. The original string is not changed.
@note: MediaWiki doesn't capitalize some characters the same way as Python.
This function tries to be close to MediaWiki's capitalize function in
title.php. See T179115 and T200357.
"""
first = string[:1]
return (_first_upper_exception(first) or first.upper()) + string[1:]
def normalize_username(username) -> Optional[str]:
"""Normalize the username."""
if not username:
return None
username = re.sub('[_ ]+', ' ', username).strip()
return first_upper(username)
class MediaWikiVersion(Version):
"""
Version object to allow comparing 'wmf' versions with normal ones.
The version mainly consist of digits separated by periods. After that is a
suffix which may only be 'wmf<number>', 'alpha', 'beta<number>' or
'-rc.<number>' (the - and . are optional). They are considered from old to
new in that order with a version number without suffix is considered the
newest. This secondary difference is stored in an internal _dev_version
attribute.
Two versions are equal if their normal version and dev version are equal. A
version is greater if the normal version or dev version is greater. For
example:
1.34 < 1.34.1 < 1.35wmf1 < 1.35alpha < 1.35beta1 < 1.35beta2
< 1.35-rc-1 < 1.35-rc.2 < 1.35
Any other suffixes are considered invalid.
"""
MEDIAWIKI_VERSION = re.compile(
r'(\d+(?:\.\d+)+)(-?wmf\.?(\d+)|alpha|beta(\d+)|-?rc\.?(\d+)|.*)?$')
@classmethod
def from_generator(cls, generator):
"""Create instance using the generator string."""
if not generator.startswith('MediaWiki '):
raise ValueError('Generator string ({!r}) must start with '
'"MediaWiki "'.format(generator))
return cls(generator[len('MediaWiki '):])
def parse(self, vstring):
"""Parse version string."""
version_match = MediaWikiVersion.MEDIAWIKI_VERSION.match(vstring)
if not version_match:
raise ValueError('Invalid version number "{}"'.format(vstring))
components = [int(n) for n in version_match.group(1).split('.')]
# The _dev_version numbering scheme might change. E.g. if a stage
# between 'alpha' and 'beta' is added, 'beta', 'rc' and stable releases
# are reassigned (beta=3, rc=4, stable=5).
if version_match.group(3): # wmf version
self._dev_version = (0, int(version_match.group(3)))
elif version_match.group(4):
self._dev_version = (2, int(version_match.group(4)))
elif version_match.group(5):
self._dev_version = (3, int(version_match.group(5)))
elif version_match.group(2) in ('alpha', '-alpha'):
self._dev_version = (1, )
else:
for handled in ('wmf', 'alpha', 'beta', 'rc'):
# if any of those pops up here our parser has failed
assert handled not in version_match.group(2), \
'Found "{}" in "{}"'.format(handled,
version_match.group(2))
if version_match.group(2):
debug('Additional unused version part '
'"{}"'.format(version_match.group(2)),
_logger)
self._dev_version = (4, )
self.suffix = version_match.group(2) or ''
self.version = tuple(components)
def __str__(self):
"""Return version number with optional suffix."""
return '.'.join(str(v) for v in self.version) + self.suffix
def _cmp(self, other):
if isinstance(other, str):
other = MediaWikiVersion(other)
if self.version > other.version:
return 1
if self.version < other.version:
return -1
if self._dev_version > other._dev_version:
return 1
if self._dev_version < other._dev_version:
return -1
return 0
class ThreadedGenerator(threading.Thread):
"""Look-ahead generator class.
Runs a generator in a separate thread and queues the results; can
be called like a regular generator.
Subclasses should override self.generator, I{not} self.run
Important: the generator thread will stop itself if the generator's
internal queue is exhausted; but, if the calling program does not use
all the generated values, it must call the generator's stop() method to
stop the background thread. Example usage:
>>> gen = ThreadedGenerator(target=range, args=(20,))
>>> try:
... data = list(gen)
... finally:
... gen.stop()
>>> data
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
"""
def __init__(self, group=None, target=None, name='GeneratorThread',
args=(), kwargs=None, qsize=65536):
"""Initializer. Takes same keyword arguments as threading.Thread.
target must be a generator function (or other callable that returns
an iterable object).
@param qsize: The size of the lookahead queue. The larger the qsize,
the more values will be computed in advance of use (which can eat
up memory and processor time).
@type qsize: int
"""
if kwargs is None:
kwargs = {}
if target:
self.generator = target
if not hasattr(self, 'generator'):
raise RuntimeError('No generator for ThreadedGenerator to run.')
self.args, self.kwargs = args, kwargs
super().__init__(group=group, name=name)
self.queue = queue.Queue(qsize)
self.finished = threading.Event()
def __iter__(self):
"""Iterate results from the queue."""
if not self.is_alive() and not self.finished.isSet():
self.start()
# if there is an item in the queue, yield it, otherwise wait
while not self.finished.isSet():
try:
yield self.queue.get(True, 0.25)
except queue.Empty:
pass
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stop the background thread."""
self.finished.set()
def run(self):
"""Run the generator and store the results on the queue."""
iterable = any(hasattr(self.generator, key)
for key in ('__iter__', '__getitem__'))
if iterable and not self.args and not self.kwargs:
self.__gen = self.generator
else:
self.__gen = self.generator(*self.args, **self.kwargs)
for result in self.__gen:
while True:
if self.finished.isSet():
return
try:
self.queue.put_nowait(result)
except queue.Full:
time.sleep(0.25)
continue
break
# wait for queue to be emptied, then kill the thread
while not self.finished.isSet() and not self.queue.empty():
time.sleep(0.25)
self.stop()
def itergroup(iterable, size: int):
"""Make an iterator that returns lists of (up to) size items from iterable.
Example:
>>> i = itergroup(range(25), 10)
>>> print(next(i))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(next(i))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
>>> print(next(i))
[20, 21, 22, 23, 24]
>>> print(next(i))
Traceback (most recent call last):
...
StopIteration
"""
group = []
for item in iterable:
group.append(item)
if len(group) == size:
yield group
group = []
if group:
yield group
def islice_with_ellipsis(iterable, *args, marker='…'):
"""
Generator which yields the first n elements of the iterable.
If more elements are available and marker is True, it returns an extra
string marker as continuation mark.
Function takes the
and the additional keyword marker.
@param iterable: the iterable to work on
@type iterable: iterable
@param args: same args as:
- C{itertools.islice(iterable, stop)}
- C{itertools.islice(iterable, start, stop[, step])}
@param marker: element to yield if iterable still contains elements
after showing the required number. Default value: '…'
@type marker: str
"""
s = slice(*args)
_iterable = iter(iterable)
yield from itertools.islice(_iterable, *args)
if marker and s.stop is not None:
with suppress(StopIteration):
next(_iterable)
yield marker
class ThreadList(list):
"""A simple threadpool class to limit the number of simultaneous threads.
Any threading.Thread object can be added to the pool using the append()
method. If the maximum number of simultaneous threads has not been reached,
the Thread object will be started immediately; if not, the append() call
will block until the thread is able to start.
>>> pool = ThreadList(limit=10)
>>> def work():
... time.sleep(1)
...
>>> for x in range(20):
... pool.append(threading.Thread(target=work))
...
"""
_logger = 'threadlist'
def __init__(self, limit=128, wait_time=2, *args):
"""Initializer.
@param limit: the number of simultaneous threads
@type limit: int
@param wait_time: how long to wait if active threads exceeds limit
@type wait_time: int or float
"""
self.limit = limit
self.wait_time = wait_time
super().__init__(*args)
for item in self:
if not isinstance(item, threading.Thread):
raise TypeError("Cannot add '{}' to ThreadList"
.format(type(item)))
def active_count(self):
"""Return the number of alive threads and delete all non-alive ones."""
cnt = 0
for item in self[:]:
if item.is_alive():
cnt += 1
else:
self.remove(item)
return cnt
def append(self, thd):
"""Add a thread to the pool and start it."""
if not isinstance(thd, threading.Thread):
raise TypeError("Cannot append '{}' to ThreadList"
.format(type(thd)))
while self.active_count() >= self.limit:
time.sleep(self.wait_time)
super().append(thd)
thd.start()
debug("thread {} ('{}') started".format(len(self), type(thd)),
self._logger)
def stop_all(self):
"""Stop all threads the pool."""
if self:
debug('EARLY QUIT: Threads: {}'.format(len(self)), self._logger)
for thd in self:
thd.stop()
debug('EARLY QUIT: Queue size left in {}: {}'
.format(thd, thd.queue.qsize()), self._logger)
def intersect_generators(genlist, allow_duplicates=False):
"""
Intersect generators listed in genlist.
Yield items only if they are yielded by all generators in genlist.
Threads (via ThreadedGenerator) are used in order to run generators
in parallel, so that items can be yielded before generators are
exhausted.
Threads are stopped when they are either exhausted or Ctrl-C is pressed.
Quitting before all generators are finished is attempted if
there is no more chance of finding an item in all queues.
@param genlist: list of page generators
@type genlist: list
@param allow_duplicates: allow duplicates if present in all generators
@type allow_duplicates: bool
"""
# If any generator is empty, no pages are going to be returned
for source in genlist:
if not source:
debug('At least one generator ({!r}) is empty and execution was '
'skipped immediately.'.format(source), 'intersect')
return
# Item is cached to check that it is found n_gen
# times before being yielded.
from collections import Counter
cache = collections.defaultdict(Counter)
n_gen = len(genlist)
# Class to keep track of alive threads.
# Start new threads and remove completed threads.
thrlist = ThreadList()
for source in genlist:
threaded_gen = ThreadedGenerator(name=repr(source), target=source)
threaded_gen.daemon = True
thrlist.append(threaded_gen)
ones = Counter(thrlist)
seen = {}
while True:
# Get items from queues in a round-robin way.
for t in thrlist:
try:
# TODO: evaluate if True and timeout is necessary.
item = t.queue.get(True, 0.1)
if not allow_duplicates and hash(item) in seen:
continue
# Cache entry is a Counter of ThreadedGenerator objects.
cache[item].update([t])
if len(cache[item]) == n_gen:
if allow_duplicates:
yield item
# Remove item from cache if possible.
if all(el == 1 for el in cache[item].values()):
cache.pop(item)
else:
cache[item] -= ones
else:
yield item
cache.pop(item)
seen[hash(item)] = True
active = thrlist.active_count()
max_cache = n_gen
if cache.values():
max_cache = max(len(v) for v in cache.values())
# No. of active threads is not enough to reach n_gen.
# We can quit even if some thread is still active.
# There could be an item in all generators which has not yet
# appeared from any generator. Only when we have lost one
# generator, then we can bail out early based on seen items.
if active < n_gen and n_gen - max_cache > active:
thrlist.stop_all()
return
except queue.Empty:
pass
except KeyboardInterrupt:
thrlist.stop_all()
# All threads are done.
if thrlist.active_count() == 0:
return
def roundrobin_generators(*iterables):
"""Yield simultaneous from each iterable.
Sample:
>>> tuple(roundrobin_generators('ABC', range(5)))
('A', 0, 'B', 1, 'C', 2, 3, 4)
@param iterables: any iterable to combine in roundrobin way
@type iterables: iterable
@return: the combined generator of iterables
@rtype: generator
"""
return (item
for item in itertools.chain.from_iterable(zip_longest(*iterables))
if item is not None)
def filter_unique(iterable, container=None, key=None, add=None):
"""
Yield unique items from an iterable, omitting duplicates.
By default, to provide uniqueness, it puts the generated items into a
set created as a local variable. It only yields items which are not
already present in the local set.
For large collections, this is not memory efficient, as a strong reference
to every item is kept in a local set which cannot be cleared.
Also, the local set can't be re-used when chaining unique operations on
multiple generators.
To avoid these issues, it is advisable for the caller to provide their own
container and set the key parameter to be the function L{hash}, or use a
L{weakref} as the key.
The container can be any object that supports __contains__.
If the container is a set or dict, the method add or __setitem__ will be
used automatically. Any other method may be provided explicitly using the
add parameter.
Beware that key=id is only useful for cases where id() is not unique.
Note: This is not thread safe.
@param iterable: the source iterable
@type iterable: collections.abc.Iterable
@param container: storage of seen items
@type container: type
@param key: function to convert the item to a key
@type key: callable
@param add: function to add an item to the container
@type add: callable
"""
if container is None:
container = set()
if not add:
if hasattr(container, 'add'):
def container_add(x):
container.add(key(x) if key else x)
add = container_add
else:
def container_setitem(x):
container.__setitem__(key(x) if key else x,
True)
add = container_setitem
for item in iterable:
try:
if (key(item) if key else item) not in container:
add(item)
yield item
except StopIteration:
return
class CombinedError(KeyError, IndexError):
"""An error that gets caught by both KeyError and IndexError."""
class EmptyDefault(str, Mapping):
"""
A default for a not existing siteinfo property.
It should be chosen if there is no better default known. It acts like an
empty collections, so it can be iterated through it safely if treated as a
list, tuple, set or dictionary. It is also basically an empty string.
Accessing a value via __getitem__ will result in a combined KeyError and
IndexError.
"""
def __init__(self):
"""Initialise the default as an empty string."""
str.__init__(self)
def __iter__(self):
"""An iterator which does nothing and drops the argument."""
return empty_iterator()
def __getitem__(self, key):
"""Raise always a L{CombinedError}."""
raise CombinedError(key)
EMPTY_DEFAULT = EmptyDefault()
class SelfCallMixin:
"""
Return self when called.
When '_own_desc' is defined it'll also issue a deprecation warning using
issue_deprecation_warning('Calling ' + _own_desc, 'it directly').
"""
def __call__(self):
"""Do nothing and just return itself."""
if hasattr(self, '_own_desc'):
issue_deprecation_warning('Calling {}'.format(self._own_desc),
'it directly',
warning_class=FutureWarning,
since='20150515')
return self
class SelfCallDict(SelfCallMixin, dict):
"""Dict with SelfCallMixin."""
pass
class SelfCallString(SelfCallMixin, str):
"""String with SelfCallMixin."""
pass
class DequeGenerator(Iterator, collections.deque):
"""A generator that allows items to be added during generating."""
def __next__(self):
"""Iterator method."""
if self:
return self.popleft()
raise StopIteration
def __repr__(self):
"""Provide an object representation without clearing the content."""
items = list(self)
result = '{}({})'.format(self.__class__.__name__, items)
self.extend(items)
return result
def open_archive(filename, mode='rb', use_extension=True):
"""
Open a file and uncompress it if needed.
This function supports bzip2, gzip, 7zip, lzma, and xz as compression
containers. It uses the packages available in the standard library for
bzip2, gzip, lzma, and xz so they are always available. 7zip is only
available when a 7za program is available and only supports reading
from it.
The compression is either selected via the magic number or file ending.
@param filename: The filename.
@type filename: str
@param use_extension: Use the file extension instead of the magic number
to determine the type of compression (default True). Must be True when
writing or appending.
@type use_extension: bool
@param mode: The mode in which the file should be opened. It may either be
'r', 'rb', 'a', 'ab', 'w' or 'wb'. All modes open the file in binary
mode. It defaults to 'rb'.
@type mode: str
@raises ValueError: When 7za is not available or the opening mode is
unknown or it tries to write a 7z archive.
@raises FileNotFoundError: When the filename doesn't exist and it tries
to read from it or it tries to determine the compression algorithm.
@raises OSError: When it's not a 7z archive but the file extension is 7z.
It is also raised by bz2 when its content is invalid. gzip does not
immediately raise that error but only on reading it.
@raises lzma.LZMAError: When error occurs during compression or
decompression or when initializing the state with lzma or xz.
@raises ImportError: When file is compressed with bz2 but neither bz2 nor
bz2file is importable, or when file is compressed with lzma or xz but
lzma is not importable.
@return: A file-like object returning the uncompressed data in binary mode.
@rtype: file-like object
"""
# extension_map maps magic_number to extension.
# Unfortunately, legacy LZMA container has no magic number
extension_map = {
b'BZh': 'bz2',
b'\x1F\x8B\x08': 'gz',
b"7z\xBC\xAF'\x1C": '7z',
b'\xFD7zXZ\x00': 'xz',
}
if mode in ('r', 'a', 'w'):
mode += 'b'
elif mode not in ('rb', 'ab', 'wb'):
raise ValueError('Invalid mode: "{}"'.format(mode))
if use_extension:
# if '.' not in filename, it'll be 1 character long but otherwise
# contain the period
extension = filename[filename.rfind('.'):][1:]
else:
if mode != 'rb':
raise ValueError('Magic number detection only when reading')
with open(filename, 'rb') as f:
magic_number = f.read(8)
for pattern in extension_map:
if magic_number.startswith(pattern):
extension = extension_map[pattern]
break
else:
extension = ''
if extension == 'bz2':
if isinstance(bz2, ImportError):
raise bz2
return bz2.BZ2File(filename, mode)
if extension == 'gz':
return gzip.open(filename, mode)
if extension == '7z':
if mode != 'rb':
raise NotImplementedError('It is not possible to write a 7z file.')
try:
process = subprocess.Popen(['7za', 'e', '-bd', '-so', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=65535)
except OSError:
raise ValueError('7za is not installed or cannot '
'uncompress "{}"'.format(filename))
else:
stderr = process.stderr.read()
process.stderr.close()
if stderr != b'':
process.stdout.close()
raise OSError(
'Unexpected STDERR output from 7za {}'.format(stderr))
return process.stdout
if extension in ('lzma', 'xz'):
if isinstance(lzma, ImportError):
raise lzma
lzma_fmts = {'lzma': lzma.FORMAT_ALONE, 'xz': lzma.FORMAT_XZ}
return lzma.open(filename, mode, format=lzma_fmts[extension])
# assume it's an uncompressed file
return open(filename, 'rb')
def merge_unique_dicts(*args, **kwargs):
"""
Return a merged dict and make sure that the original dicts keys are unique.
The positional arguments are the dictionaries to be merged. It is also
possible to define an additional dict using the keyword arguments.
"""
args = list(args) + [dict(kwargs)]
conflicts = set()
result = {}
for arg in args:
conflicts |= set(arg.keys()) & set(result.keys())
result.update(arg)
if conflicts:
raise ValueError('Multiple dicts contain the same keys: {}'
.format(', '.join(sorted(str(key)
for key in conflicts))))
return result
# Decorators
#
# Decorator functions without parameters are _invoked_ differently from
# decorator functions with function syntax. For example, @deprecated causes
# a different invocation to @deprecated().
# The former is invoked with the decorated function as args[0].
# The latter is invoked with the decorator arguments as *args & **kwargs,
# and it must return a callable which will be invoked with the decorated
# function as args[0].
# The follow deprecators may support both syntax, e.g. @deprecated and
# @deprecated() both work. In order to achieve that, the code inspects
# args[0] to see if it callable. Therefore, a decorator must not accept
# only one arg, and that arg be a callable, as it will be detected as
# a deprecator without any arguments.
def add_decorated_full_name(obj, stacklevel=1):
"""Extract full object name, including class, and store in __full_name__.
This must be done on all decorators that are chained together, otherwise
the second decorator will have the wrong full name.
@param obj: A object being decorated
@type obj: object
@param stacklevel: level to use
@type stacklevel: int
"""
if hasattr(obj, '__full_name__'):
return
# The current frame is add_decorated_full_name
# The next frame is the decorator
# The next frame is the object being decorated
frame = sys._getframe(stacklevel + 1)
class_name = frame.f_code.co_name
if class_name and class_name != '<module>':
obj.__full_name__ = '{}.{}.{}'.format(obj.__module__,
class_name, obj.__name__)
else:
obj.__full_name__ = '{}.{}'.format(obj.__module__, obj.__name__)
def manage_wrapping(wrapper, obj):
"""Add attributes to wrapper and wrapped functions."""
wrapper.__doc__ = obj.__doc__
wrapper.__name__ = obj.__name__
wrapper.__module__ = obj.__module__
wrapper.__signature__ = inspect.signature(obj)
if not hasattr(obj, '__full_name__'):
add_decorated_full_name(obj, 2)
wrapper.__full_name__ = obj.__full_name__
# Use the previous wrappers depth, if it exists
wrapper.__depth__ = getattr(obj, '__depth__', 0) + 1
# Obtain the wrapped object from the previous wrapper
wrapped = getattr(obj, '__wrapped__', obj)
wrapper.__wrapped__ = wrapped
# Increment the number of wrappers
if hasattr(wrapped, '__wrappers__'):
wrapped.__wrappers__ += 1
else:
wrapped.__wrappers__ = 1
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function."""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@param outer_kwargs: kwargs
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = inspect.signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and not outer_kwargs
and callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper
def _build_msg_string(instead, since):
"""Build a deprecation warning message format string."""
if not since:
since = ''
elif '.' in since:
since = ' since release ' + since
else:
year_str = month_str = day_str = ''
days = (datetime.utcnow() - datetime.strptime(since, '%Y%m%d')).days
years = days // 365
days = days % 365
months = days // 30
days = days % 30
if years == 1:
years = 0
months += 12
if years:
year_str = '{} years'.format(years)
else:
day_str = '{} day{}'.format(days, 's' if days != 1 else '')
if months:
month_str = '{} month{}'.format(
months, 's' if months != 1 else '')
if year_str and month_str:
year_str += ' and '
if month_str and day_str:
month_str += ' and '
since = ' for {}{}{}'.format(year_str, month_str, day_str)
if instead:
msg = '{{0}} is deprecated{since}; use {{1}} instead.'
else:
msg = '{{0}} is deprecated{since}.'
return msg.format(since=since)
def issue_deprecation_warning(name: str, instead=None, depth=2,
warning_class=None, since=None):
"""Issue a deprecation warning.
@param name: the name of the deprecated object
@param instead: suggested replacement for the deprecated object
@type instead: str or None
@param depth: depth + 1 will be used as stacklevel for the warnings
@type depth: int
@param warning_class: a warning class (category) to be used, defaults to
DeprecationWarning
@type warning_class: type
@param since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@type since: str or None
"""
msg = _build_msg_string(instead, since)
if warning_class is None:
warning_class = (DeprecationWarning
if instead else _NotImplementedWarning)
warn(msg.format(name, instead), warning_class, depth + 1)
@add_full_name
def deprecated(*args, **kwargs):
"""Decorator to output a deprecation warning.
@kwarg instead: if provided, will be used to specify the replacement
@type instead: str
@kwarg since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@type since: str
@kwarg future_warning: if True a FutureWarning will be thrown,
otherwise it defaults to DeprecationWarning
@type future_warning: bool
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*args, **kwargs):
"""Replacement function.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
issue_deprecation_warning(
name, instead, depth, since=since,
warning_class=FutureWarning if future_warning else None)
return obj(*args, **kwargs)
def add_docstring(wrapper):
"""Add a Deprecated notice to the docstring."""
deprecation_notice = 'Deprecated'
if instead:
deprecation_notice += '; use ' + instead + ' instead'
deprecation_notice += '.\n\n'
if wrapper.__doc__: # Append old docstring after the notice
wrapper.__doc__ = deprecation_notice + wrapper.__doc__
else:
wrapper.__doc__ = deprecation_notice
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
# Regular expression to find existing deprecation notices
deprecated_notice = re.compile(r'(^|\s)DEPRECATED[.:;,]',
re.IGNORECASE)
# Add the deprecation notice to the docstring if not present
if not wrapper.__doc__:
add_docstring(wrapper)
else:
if not deprecated_notice.search(wrapper.__doc__):
add_docstring(wrapper)
else:
# Get docstring up to @params so deprecation notices for
# parameters don't disrupt it
trim_params = re.compile(r'^.*?((?=@param)|$)', re.DOTALL)
trimmed_doc = trim_params.match(wrapper.__doc__).group(0)
if not deprecated_notice.search(trimmed_doc): # No notice
add_docstring(wrapper)
return wrapper
since = kwargs.pop('since', None)
future_warning = kwargs.pop('future_warning', False)
without_parameters = (len(args) == 1 and len(kwargs) == 0
and callable(args[0]))
if 'instead' in kwargs:
instead = kwargs['instead']
elif not without_parameters and len(args) == 1:
instead = args[0]
else:
instead = False
# When called as @deprecated, return a replacement function
if without_parameters:
if not __debug__:
return args[0]
return decorator(args[0])
# Otherwise return a decorator, which returns a replacement function
return decorator
def deprecate_arg(old_arg: str, new_arg):
"""Decorator to declare old_arg deprecated and replace it with new_arg.
Usage:
@deprecate_arg('foo', 'bar')
def my_function(bar='baz'): pass
# replaces 'foo' keyword by 'bar' used by my_function
@deprecare_arg('foo', None)
def my_function(): pass
# ignores 'foo' keyword no longer used by my_function
deprecated_args decorator should be used in favour of this
deprecate_arg decorator but it is held to deprecate args which become
a reserved word in future Python releases and to prevent syntax errors.
@param old_arg: old keyword
@param new_arg: new keyword
@type new_arg: str or None or bool
"""
return deprecated_args(**{old_arg: new_arg})
def deprecated_args(**arg_pairs):
"""Decorator to declare multiple args deprecated.
Usage:
@deprecated_args(foo='bar', baz=None)
def my_function(bar='baz'): pass
# replaces 'foo' keyword by 'bar' and ignores 'baz' keyword
@param arg_pairs: Each entry points to the new argument name. If an
argument is to be removed, the value may be one of the following:
- None: shows a DeprecationWarning
- False: shows a PendingDeprecationWarning
- True: shows a FutureWarning (only once)
- empty string: no warning is printed
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@param __kw: kwargs passed to the decorated function
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
for old_arg, new_arg in arg_pairs.items():
output_args = {
'name': name,
'old_arg': old_arg,
'new_arg': new_arg,
}
if old_arg not in __kw:
continue
if new_arg not in [True, False, None, '']:
if new_arg in __kw:
warn('{new_arg} argument of {name} '
'replaces {old_arg}; cannot use both.'
.format_map(output_args),
RuntimeWarning, depth)
else:
# If the value is positionally given this will
# cause a TypeError, which is intentional
warn('{old_arg} argument of {name} '
'is deprecated; use {new_arg} instead.'
.format_map(output_args),
DeprecationWarning, depth)
__kw[new_arg] = __kw[old_arg]
elif new_arg == '':
pass
else:
if new_arg is False:
cls = PendingDeprecationWarning
elif new_arg is True:
cls = FutureWarning
else: # new_arg is None
cls = DeprecationWarning
warn('{old_arg} argument of {name} is deprecated.'
.format_map(output_args),
cls, depth)
del __kw[old_arg]
return obj(*__args, **__kw)
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
if wrapper.__signature__:
# Build a new signature with deprecated args added.
params = collections.OrderedDict()
for param in wrapper.__signature__.parameters.values():
params[param.name] = param.replace()
for old_arg, new_arg in arg_pairs.items():
params[old_arg] = inspect.Parameter(
old_arg, kind=inspect._POSITIONAL_OR_KEYWORD,
default='[deprecated name of {}]'.format(new_arg)
if new_arg not in [True, False, None, '']
else NotImplemented)
params = collections.OrderedDict(sorted(params.items(),
key=lambda x: x[1].kind))
wrapper.__signature__ = inspect.Signature()
wrapper.__signature__._parameters = params
return wrapper
return decorator
def remove_last_args(arg_names):
"""
Decorator to declare all args additionally provided deprecated.
All positional arguments appearing after the normal arguments are marked
deprecated. It marks also all keyword arguments present in arg_names as
deprecated. Any arguments (positional or keyword) which are not present in
arg_names are forwarded. For example a call with 3 parameters and the
original function requests one and arg_names contain one name will result
in an error, because the function got called with 2 parameters.
The decorated function may not use C{*args} or C{**kwargs}.
@param arg_names: The names of all arguments.
@type arg_names: iterable; for the most explanatory message it should
retain the given order (so not a set for example).
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@param __kw: kwargs passed to the decorated function
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
args, varargs, kwargs, *_ = getfullargspec(wrapper.__wrapped__)
if varargs is not None and kwargs is not None:
raise ValueError('{} may not have * or ** args.'
.format(name))
deprecated = set(__kw) & set(arg_names)
if len(__args) > len(args):
deprecated.update(arg_names[:len(__args) - len(args)])
# remove at most |arg_names| entries from the back
new_args = tuple(__args[:max(len(args),
len(__args) - len(arg_names))])
new_kwargs = {arg: val for arg, val in __kw.items()
if arg not in arg_names}
if deprecated:
# sort them according to arg_names
deprecated = [arg for arg in arg_names if arg in deprecated]
warn("The trailing arguments ('{}') of {} are deprecated. "
"The value(s) provided for '{}' have been dropped."
.format("', '".join(arg_names), name,
"', '".join(deprecated)),
DeprecationWarning, depth)
return obj(*new_args, **new_kwargs)
manage_wrapping(wrapper, obj)
return wrapper
return decorator
def redirect_func(target, source_module: Optional[str] = None,
target_module: Optional[str] = None,
old_name: Optional[str] = None,
class_name: Optional[str] = None,
since: Optional[str] = None,
future_warning=False):
"""
Return a function which can be used to redirect to 'target'.
It also acts like marking that function deprecated and copies all
parameters.
@param target: The targeted function which is to be executed.
@type target: callable
@param source_module: The module of the old function. If '.' defaults
to target_module. If 'None' (default) it tries to guess it from the
executing function.
@param target_module: The module of the target function. If
'None' (default) it tries to get it from the target. Might not work
with nested classes.
@param old_name: The old function name. If None it uses the name of the
new function.
@param class_name: The name of the class. It's added to the target and
source module (separated by a '.').
@param since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@param future_warning: if True a FutureWarning will be thrown,
otherwise it defaults to DeprecationWarning
@type future_warning: bool
@return: A new function which adds a warning prior to each execution.
@rtype: callable
"""
def call(*a, **kw):
issue_deprecation_warning(
old_name, new_name, since=since,
warning_class=FutureWarning if future_warning else None)
return target(*a, **kw)
if target_module is None:
target_module = target.__module__
if target_module and target_module[-1] != '.':
target_module += '.'
if source_module == '.':
source_module = target_module
elif source_module and source_module[-1] != '.':
source_module += '.'
else:
source_module = sys._getframe(1).f_globals['__name__'] + '.'
if class_name:
target_module += class_name + '.'
source_module += class_name + '.'
old_name = source_module + (old_name or target.__name__)
new_name = target_module + target.__name__
if not __debug__:
return target
return call
class ModuleDeprecationWrapper(types.ModuleType):
"""A wrapper for a module to deprecate classes or variables of it."""
def __init__(self, module):
"""
Initialise the wrapper.
It will automatically overwrite the module with this instance in
C{sys.modules}.
@param module: The module name or instance
@type module: str or module
"""
if isinstance(module, (str, bytes)):
module = sys.modules[module]
super().__setattr__('_deprecated', {})
super().__setattr__('_module', module)
self.__dict__.update(module.__dict__)
if __debug__:
sys.modules[module.__name__] = self
def _add_deprecated_attr(self, name: str, replacement=None,
replacement_name: Optional[str] = None,
warning_message: Optional[str] = None,
since: Optional[str] = None,
future_warning: bool = False):
"""
Add the name to the local deprecated names dict.
@param name: The name of the deprecated class or variable. It may not
be already deprecated.
@param replacement: The replacement value which should be returned
instead. If the name is already an attribute of that module this
must be None. If None it'll return the attribute of the module.
@type replacement: any
@param replacement_name: The name of the new replaced value. Required
if C{replacement} is not None and it has no __name__ attribute.
If it contains a '.', it will be interpreted as a Python dotted
object name, and evaluated when the deprecated object is needed.
@param warning_message: The warning to display, with positional
variables: {0} = module, {1} = attribute name, {2} = replacement.
@param since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@param future_warning: if True a FutureWarning will be thrown,
otherwise it defaults to DeprecationWarning
"""
if '.' in name:
raise ValueError('Deprecated name "{}" may not contain '
'".".'.format(name))
if name in self._deprecated:
raise ValueError('Name "{}" is already deprecated.'.format(name))
if replacement is not None and hasattr(self._module, name):
raise ValueError('Module has already an attribute named '
'"{}".'.format(name))
if replacement_name is None:
if hasattr(replacement, '__name__'):
replacement_name = replacement.__module__
if hasattr(replacement, '__self__'):
replacement_name += '.'
replacement_name += replacement.__self__.__class__.__name__
replacement_name += '.' + replacement.__name__
else:
raise TypeError('Replacement must have a __name__ attribute '
'or a replacement name must be set '
'specifically.')
if not warning_message:
warning_message = _build_msg_string(
replacement_name, since).format('{0}.{1}', '{2}')
if hasattr(self, name):
# __getattr__ will only be invoked if self.<name> does not exist.
delattr(self, name)
self._deprecated[name] = (
replacement_name, replacement, warning_message, future_warning)
def __setattr__(self, attr, value):
"""Set the value of the wrapped module."""
self.__dict__[attr] = value
setattr(self._module, attr, value)
def __getattr__(self, attr):
"""Return the attribute with a deprecation warning if required."""
if attr in self._deprecated:
name, repl, message, future = self._deprecated[attr]
warning_message = message
warn(warning_message.format(self._module.__name__, attr, name),
FutureWarning if future else DeprecationWarning, 2)
if repl:
return repl
if '.' in name:
with suppress(Exception):
package_name = name.split('.', 1)[0]
module = import_module(package_name)
context = {package_name: module}
replacement = eval(name, context)
self._deprecated[attr] = (
name, replacement, message, future)
return replacement
return getattr(self._module, attr)
def file_mode_checker(filename: str, mode=0o600, quiet=False, create=False):
"""Check file mode and update it, if needed.
@param filename: filename path
@param mode: requested file mode
@type mode: int
@param quiet: warn about file mode change if False.
@type quiet: bool
@param create: create the file if it does not exist already
@type create: bool
@raise IOError: The file does not exist and `create` is False.
"""
try:
st_mode = os.stat(filename).st_mode
except OSError: # file does not exist
if not create:
raise
os.close(os.open(filename, os.O_CREAT | os.O_EXCL, mode))
return
warn_str = 'File {0} had {1:o} mode; converted to {2:o} mode.'
if stat.S_ISREG(st_mode) and (st_mode - stat.S_IFREG != mode):
os.chmod(filename, mode)
# re-read and check changes
if os.stat(filename).st_mode != st_mode and not quiet:
warn(warn_str.format(filename, st_mode - stat.S_IFREG, mode))
def compute_file_hash(filename: str, sha='sha1', bytes_to_read=None):
"""Compute file hash.
Result is expressed as hexdigest().
@param filename: filename path
@param sha: hashing function among the following in hashlib:
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
function name shall be passed as string, e.g. 'sha1'.
@type sha: str
@param bytes_to_read: only the first bytes_to_read will be considered;
if file size is smaller, the whole file will be considered.
@type bytes_to_read: None or int
"""
size = os.path.getsize(filename)
if bytes_to_read is None:
bytes_to_read = size
else:
bytes_to_read = min(bytes_to_read, size)
step = 1 << 20
shas = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
assert sha in shas
sha = getattr(hashlib, sha)() # sha instance
with open(filename, 'rb') as f:
while bytes_to_read > 0:
read_bytes = f.read(min(bytes_to_read, step))
assert read_bytes # make sure we actually read bytes
bytes_to_read -= len(read_bytes)
sha.update(read_bytes)
return sha.hexdigest()
# deprecated parts ############################################################
@deprecated('bot_choice.Option and its subclasses', since='20181217')
def concat_options(message, line_length, options):
"""DEPRECATED. Concatenate options."""
indent = len(message) + 2
line_length -= indent
option_msg = ''
option_line = ''
for option in options:
if option_line:
option_line += ', '
# +1 for ','
if len(option_line) + len(option) + 1 > line_length:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line[:-1] # remove space
option_line = ''
option_line += option
if option_line:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line
return '{} ({}):'.format(message, option_msg)
|
gpu_helper.py
|
import config
import xml.etree.ElementTree as ET
import pwd
import logging
from datetime import datetime, timedelta
import threading
import pickle
import os
import traceback
from multiprocessing.dummy import Pool as ThreadPool
import paramiko
import socket
logger = logging.getLogger()
# init cache
cache = {
'users': {},
'since': datetime.now()
}
# init template
user_template = {'name': '', 'inc': 0, 'time': timedelta(seconds=0.), 'cum_energy': 0., 'cum_util': 0.}
def get_nvidiasmi(ssh):
# function to get nvidia smi xmls
_, ssh_stdout, _ = ssh.exec_command('nvidia-smi -q -x')
try:
ret = ET.fromstring(''.join(ssh_stdout.readlines()))
return ret
except ET.ParseError:
return False
def get_ps(ssh, pids):
# function to identify processes running
pid_cmd = 'ps -o pid= -o ruser= -p {}'.format(','.join(pids))
_, ssh_stdout, _ = ssh.exec_command(pid_cmd)
res = ''.join(ssh_stdout.readlines())
return res
def get_users_by_pid(ps_output):
# function to identify user of a process
users_by_pid = {}
if ps_output is None:
return users_by_pid
for line in ps_output.strip().split('\n'):
pid, user = line.split()
users_by_pid[pid] = user
return users_by_pid
def update_users(info):
# updates cache of user usage statistics
for user, real_user in zip(info['users'], info['real_users']):
if user not in cache['users']:
cache['users'][user] = {}
cache['users'][user].update(user_template)
cache['users'][user]['name'] = real_user
cache['users'][user]['inc'] += 1
cache['users'][user]['time'] += config.update_interval
pwr_float = float(info['power_draw'][:-2])
cache['users'][user]['cum_energy'] += pwr_float * config.update_interval.total_seconds() / 3600
gpu_util = float(info['gpu_util'][:-2])
cache['users'][user]['cum_util'] += gpu_util
def get_gpu_infos(ssh):
# collects gpu usage information for a ssh connection
nvidiasmi_output = get_nvidiasmi(ssh)
if not nvidiasmi_output:
return False
gpus = nvidiasmi_output.findall('gpu')
gpu_infos = []
for idx, gpu in enumerate(gpus):
model = gpu.find('product_name').text
power_draw = gpu.find('power_readings').find('power_draw').text
processes = gpu.findall('processes')[0]
pids = [process.find('pid').text for process in processes if config.process_filter.search(process.find('process_name').text)]
mem = gpu.find('fb_memory_usage').find('total').text
gpu_util = gpu.find('utilization').find('gpu_util').text
used_mem = gpu.find('fb_memory_usage').find('used').text
free = (len(pids) == 0)
info = {
'idx': idx,
'model': model,
'pids': pids,
'power_draw': power_draw,
'free': free,
'mem': mem,
'gpu_util': gpu_util,
'used_mem': used_mem
}
if free:
users = []
real_users = []
else:
ps_output = get_ps(ssh, pids)
users_by_pid = get_users_by_pid(ps_output)
users = set((users_by_pid[pid] for pid in pids))
real_users = [pwd.getpwnam(user).pw_gecos.split(',')[0] for user in users]
info['users'] = users
info['real_users'] = real_users
update_users(info)
gpu_infos.append(info)
return gpu_infos
def get_remote_info(server):
# returns gpu information from cache
tstring = cache['servers'][server]['time'].strftime('%d.%m.%Y %H:%M:%S')
logger.info(f'Using cache for {server} from {tstring}')
return cache['servers'][server]
def get_new_server_info(server):
server_info = {}
try:
ssh = paramiko.SSHClient()
# be careful to change this if you don't trust to add the hostkeys automatically
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
logging.info(f'loading server {server}')
ssh.connect(server, username=config.user, password=config.password, key_filename=config.key)
try:
gpu_infos = get_gpu_infos(ssh)
if not gpu_infos:
server_info['smi_error'] = True
else:
server_info['info'] = gpu_infos
server_info['smi_error'] = False
server_info['time'] = datetime.now()
logging.info(f'finished loading server {server}')
finally:
ssh.close()
del ssh
except Exception:
logging.error(f'Had an issue while updating cache for {server}: {traceback.format_exc()}')
return server, server_info
def update_cache(server, interval):
# asyncronously updates cache if interval is passed
logging.info('updating cache')
server, result = get_new_server_info(server)
if result:
cache['servers'][server].update(result)
logging.info('restarting timer to update chache')
threading.Timer(interval.total_seconds(), update_cache, (server, interval, )).start()
def write_cache(interval):
with open(config.cache_file, 'wb') as f:
pickle.dump(cache, f)
threading.Timer(interval.total_seconds(), write_cache, (interval, )).start()
def start_async(interval):
# asyncronously updates cache
for server in config.servers:
threading.Thread(target=update_cache, args=(server, interval, )).start()
threading.Thread(target=write_cache, args=(interval, )).start()
def setup():
cache['servers'] = {server: {'time': datetime.fromtimestamp(0.), 'info': []} for server in config.servers}
# start async updates of cache
if os.path.isfile(config.cache_file):
with open(config.cache_file, 'rb') as f:
cache.update(pickle.load(f))
start_async(config.update_interval)
|
executor.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################
# Cyclic grid strategy based on martingale
##################################################################
__author__ = "Jerry Fedorenko"
__copyright__ = "Copyright © 2021 Jerry Fedorenko aka VM"
__license__ = "MIT"
__version__ = "1.0rc6"
__maintainer__ = "Jerry Fedorenko"
__contact__ = 'https://github.com/DogsTailFarmer'
##################################################################
try:
from margin_wrapper import * # lgtm [py/polluting-import]
from margin_wrapper import __version__ as msb_ver
except ImportError:
from margin_strategy_sdk import * # lgtm [py/polluting-import] skipcq: PY-W2000
from typing import Dict, List
import time
import math
import os
import simplejson as json
import charset_normalizer # lgtm [py/unused-import] skipcq: PY-W2000
msb_ver = ''
STANDALONE = False
else:
STANDALONE = True
#
import gc
import psutil
import sqlite3
import statistics
from datetime import datetime
from decimal import Decimal, ROUND_FLOOR, ROUND_CEILING
from threading import Thread
import queue
import requests
# region SetParameters
SYMBOL = str()
EXCHANGE = ()
# Exchange setup
ID_EXCHANGE = int()
FEE_IN_PAIR = bool()
FEE_MAKER = Decimal()
FEE_TAKER = Decimal()
FEE_SECOND = bool()
FEE_BNB_IN_PAIR = bool()
GRID_MAX_COUNT = int()
EXTRA_CHECK_ORDER_STATE = bool()
# Trade parameter
START_ON_BUY = bool()
AMOUNT_FIRST = Decimal()
USE_ALL_FIRST_FUND = bool()
AMOUNT_SECOND = Decimal()
PRICE_SHIFT = float()
# Round pattern
ROUND_BASE = str()
ROUND_QUOTE = str()
#
PROFIT = Decimal()
PROFIT_MAX = Decimal()
PROFIT_REVERSE = Decimal()
OVER_PRICE = Decimal()
ORDER_Q = int()
MARTIN = Decimal()
SHIFT_GRID_DELAY = int()
# Other
STATUS_DELAY = int()
GRID_ONLY = bool()
LOG_LEVEL_NO_PRINT = []
#
ADAPTIVE_TRADE_CONDITION = bool()
BB_CANDLE_SIZE_IN_MINUTES = int()
BB_NUMBER_OF_CANDLES = int()
KBB = float()
PROFIT_K = float()
#
LINEAR_GRID_K = int()
#
ADX_CANDLE_SIZE_IN_MINUTES = int()
ADX_NUMBER_OF_CANDLES = int()
ADX_PERIOD = int()
ADX_THRESHOLD = int()
ADX_PRICE_THRESHOLD = float()
# Reverse cycle
REVERSE = bool()
REVERSE_TARGET_AMOUNT = float()
REVERSE_INIT_AMOUNT = Decimal()
REVERSE_STOP = bool()
# Config variables
HEAD_VERSION = str()
LOAD_LAST_STATE = int()
# Path and files name
LOG_PATH = str()
WORK_PATH = str()
LAST_STATE_PATH = str()
FILE_LAST_STATE = str()
VPS_NAME = str()
# Telegram
TELEGRAM_URL = str()
TOKEN = str()
CHANNEL_ID = str()
# endregion
class Style:
BLACK: str = '\033[30m'
RED: str = '\033[31m'
B_RED: str = '\033[1;31m'
GREEN: str = '\033[32m'
YELLOW: str = '\033[33m'
B_YELLOW: str = "\033[33;1m"
BLUE: str = '\033[34m'
MAGENTA: str = '\033[35m'
CYAN: str = '\033[36m'
GRAY: str = '\033[37m'
WHITE: str = '\033[0;37m'
B_WHITE: str = '\033[1;37m'
UNDERLINE: str = '\033[4m'
RESET: str = '\033[0m'
@classmethod
def __add__(cls, b):
return Style() + b
def telegram(queue_to_tlg, _bot_id) -> None:
url = TELEGRAM_URL
token = TOKEN
channel_id = CHANNEL_ID
url += token
method = url + '/sendMessage'
def telegram_get(offset=None) -> []:
command_list = []
_method = url + '/getUpdates'
res = None
try:
res = requests.post(_method, data={'chat_id': channel_id, 'offset': offset})
except Exception as _exp:
print(f"telegram_get: {_exp}")
if res and res.status_code == 200:
result = res.json().get('result')
# print(f"telegram_get.result: {result}")
message_id = None
text_in = None
reply_to_message = None
for i in result:
update_id = i.get('update_id')
message = i.get('message')
if message:
from_id = message.get('from').get('id')
if from_id == int(channel_id):
message_id = message.get('message_id')
text_in = i.get('message').get('text')
try:
reply_to_message = i.get('message').get('reply_to_message').get('text')
except AttributeError:
reply_to_message = None
command_list.append({'update_id': update_id, 'message_id': message_id,
'text_in': text_in, 'reply_to_message': reply_to_message})
return command_list
connection_control = sqlite3.connect(WORK_PATH + 'funds_rate.db')
cursor_control = connection_control.cursor()
offset_id = None
while True:
try:
text = queue_to_tlg.get(block=True, timeout=10)
except KeyboardInterrupt:
break
except queue.Empty:
# Get external command from Telegram bot
x = telegram_get(offset_id)
if x:
offset_id = x[-1].get('update_id')
offset_id += 1
for n in x:
a = n.get('reply_to_message')
if a:
bot_id = a.split('.')[0]
if bot_id == _bot_id:
cursor_control.execute('insert into t_control values(?,?,?,?)',
(n['message_id'], n['text_in'], bot_id, None))
# Send receipt
text = f"Received {n['text_in']} command, OK"
try:
requests.post(method, data={'chat_id': channel_id, 'text': text})
except Exception as _ex:
print(f"telegram: {_ex}")
connection_control.commit()
else:
if text and 'stop_signal_QWE#@!' in text:
break
try:
requests.post(method, data={'chat_id': channel_id, 'text': text})
except Exception as _ex:
print(f"telegram: {_ex}")
def save_to_db(queue_to_db) -> None:
connection_analytic = sqlite3.connect(WORK_PATH + 'funds_rate.db', check_same_thread=False)
cursor_analytic = connection_analytic.cursor()
# Compliance check t_exchange and EXCHANGE() = exchange() from ms_cfg.toml
cursor_analytic.execute("SELECT id_exchange, name FROM t_exchange")
row = cursor_analytic.fetchall()
row_n = len(row)
for i, exch in enumerate(EXCHANGE):
if i >= row_n:
print(f"save_to_db: Add exchange {i}, {exch}")
cursor_analytic.execute("INSERT into t_exchange values(?,?)", (i, exch))
connection_analytic.commit()
# Save data to .db
data = None
while True:
try:
data = queue_to_db.get()
except KeyboardInterrupt:
pass
if data is None or data.get('stop_signal'):
break
print("save_to_db: Record row into .db")
cursor_analytic.execute("insert into t_funds values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(ID_EXCHANGE,
None,
data.get('f_currency'),
data.get('s_currency'),
float(data.get('f_funds')),
float(data.get('s_funds')),
float(data.get('avg_rate')),
data.get('cycle_buy'),
float(data.get('f_depo')),
float(data.get('s_depo')),
float(data.get('f_profit')),
float(data.get('s_profit')),
datetime.utcnow(),
PRICE_SHIFT,
float(PROFIT),
float(data.get('over_price')),
data.get('order_q'),
float(MARTIN),
LINEAR_GRID_K,
ADAPTIVE_TRADE_CONDITION,
KBB,
PROFIT_K,
data.get('cycle_time')))
connection_analytic.commit()
connection_analytic.commit()
def float2decimal(_f: float) -> Decimal:
return Decimal(str(_f))
class Orders:
def __init__(self):
self.orders_list = []
def __iter__(self):
for i in self.orders_list:
yield i
def __len__(self):
return len(self.orders_list)
def append(self, _id: int, buy: bool, amount: Decimal, price: Decimal):
self.orders_list.append({'id': _id, 'buy': buy, 'amount': amount, 'price': price})
def remove(self, _id: int):
self.orders_list[:] = [i for i in self.orders_list if i['id'] != _id]
def find_order(self, in_orders: [], place_order_id: int):
"""
Find equal order in_orders[] and self.orders_list[] where in_orders[].id == place_order_id
If exist return order: Order
"""
order = None
for i in self.orders_list:
if i['id'] == place_order_id:
for k, o in enumerate(in_orders):
if o.buy == i['buy'] and o.amount == i['amount'] and o.price == i['price']:
order = in_orders[k]
break
if order:
break
return order
def get_by_id(self, _id: int) -> ():
for i in self.orders_list:
if i['id'] == _id:
return i['buy'], i['amount'], i['price']
return ()
def exist(self, _id: int) -> bool:
return any(i['id'] == _id for i in self.orders_list)
def get(self) -> []:
"""
Get List of Dict for orders
:return: []
"""
orders = []
for i in self.orders_list:
orders.append({'id': i['id'], 'buy': i['buy'], 'amount': i['amount'], 'price': i['price']})
return orders
def get_id_list(self) -> []:
"""
Get List of orders id
:return: []
"""
orders = []
for i in self.orders_list:
orders.append(i['id'])
return orders
def get_first(self) -> ():
"""
Get [0] Dict for order
:return: (buy, amount, price)
"""
return self.orders_list[0]['buy'], self.orders_list[0]['amount'], self.orders_list[0]['price']
def restore(self, order_list: []):
self.orders_list.clear()
for i in order_list:
i_dec = {'id': i.get('id'),
'buy': i.get('buy'),
'amount': float2decimal(i.get('amount')),
'price': float2decimal(i.get('price'))}
self.orders_list.append(i_dec)
class Strategy(StrategyBase):
##############################################################
# strategy logic methods
##############################################################
def __init__(self):
super().__init__()
print(f"Init Strategy, ver: {HEAD_VERSION} + {__version__} + {msb_ver}")
self.cycle_buy = not START_ON_BUY if REVERSE else START_ON_BUY # + Direction (Buy/Sell) for current cycle
self.orders_grid = Orders() # + List of grid orders
self.orders_init = Orders() # - List of initial grid orders
self.orders_hold = Orders() # + List of grid orders for later place
# Take profit variables
self.tp_order_id = None # + Take profit order id
self.tp_wait_id = None # -
self.tp_order = () # - (id, buy, amount, price) Placed take profit order
self.tp_error = False # Flag when can't place tp
self.tp_order_hold = {} # - Save unreleased take profit order
self.tp_hold = False # - Flag for replace take profit order
self.tp_cancel = False # - Wanted cancel tp order after successes place and Start()
self.tp_cancel_from_grid_handler = False # -
self.tp_hold_additional = False # - Need place TP after placed additional grid orders
self.tp_target = Decimal('0') # + Target amount for TP that will be placed
self.tp_amount = Decimal('0') # + Initial depo for active TP
self.part_profit_first = Decimal('0') # +
self.part_profit_second = Decimal('0') # +
self.tp_was_filled = () # - Exist incomplete processing filled TP
self.tp_part_amount_first = Decimal('0') # + Sum partially filled TP
self.tp_part_amount_second = Decimal('0') # + Sum partially filled TP
self.tp_init = (Decimal('0'), Decimal('0')) # + (sum_amount_first, sum_amount_second) initial amount for TP
#
self.sum_amount_first = Decimal('0') # Sum buy/sell in first currency for current cycle
self.sum_amount_second = Decimal('0') # Sum buy/sell in second currency for current cycle
#
self.correction_amount_first = Decimal('0') # +
self.correction_amount_second = Decimal('0') # +
#
self.deposit_first = AMOUNT_FIRST # + Calculated operational deposit
self.deposit_second = AMOUNT_SECOND # + Calculated operational deposit
self.sum_profit_first = Decimal('0') # + Sum profit from start to now()
self.sum_profit_second = Decimal('0') # + Sum profit from start to now()
self.cycle_buy_count = 0 # + Count for buy cycle
self.cycle_sell_count = 0 # + Count for sale cycle
self.shift_grid_threshold = None # - Price level of shift grid threshold for current cycle
self.f_currency = '' # - First currency name
self.s_currency = '' # - Second currency name
self.connection_analytic = None # - Connection to .db
self.tlg_header = '' # - Header for Telegram message
self.last_shift_time = None # +
self.avg_rate = Decimal('0') # - Flow average rate for trading pair
#
self.grid_hold = {} # - Save for later create grid orders
self.start_hold = False # - Hold start if exist not accepted grid order(s)
self.initial_first = Decimal('0') # + Use if balance replenishment delay
self.initial_second = Decimal('0') # + Use if balance replenishment delay
self.initial_reverse_first = Decimal('0') # + Use if balance replenishment delay
self.initial_reverse_second = Decimal('0') # + Use if balance replenishment delay
self.wait_refunding_for_start = False # -
#
self.cancel_order_id = None # - Exist canceled not confirmed order
self.over_price = None # + Adaptive over price
self.grid_place_flag = False # - Flag when placed next part of grid orders
self.part_amount_first = Decimal('0') # + Amount of partially filled order
self.part_amount_second = Decimal('0') # + Amount of partially filled order
self.command = None # + External input command from Telegram
self.start_after_shift = False # - Flag set before shift, clear into Start()
self.queue_to_db = queue.Queue() # - Queue for save data to .db
self.pr_db = None # - Process for save data to .db
self.queue_to_tlg = queue.Queue() # - Queue for sending message to Telegram
self.pr_tlg = None # - Process for sending message to Telegram
self.pr_tlg_control = None # - Process for get command from Telegram
self.restart = None # - Set after execute take profit order and restart cycle
self.profit_first = Decimal('0') # + Cycle profit
self.profit_second = Decimal('0') # + Cycle profit
self.status_time = None # + Last time sending status message
self.cycle_time = None # + Cycle start time
self.cycle_time_reverse = None # + Reverse cycle start time
self.reverse = REVERSE # + Current cycle is Reverse
self.reverse_target_amount = REVERSE_TARGET_AMOUNT if REVERSE else None # + Return amount for reverse cycle
self.reverse_init_amount = REVERSE_INIT_AMOUNT if REVERSE else Decimal('0') # + Actual amount of initial cycle
self.reverse_hold = False # + Exist unreleased reverse state
self.reverse_price = None # + Price when execute last grid order and hold reverse cycle
self.round_base = '1.0123456789' # - Round pattern for 0.00000 = 0.00
self.round_quote = '1.0123456789' # - Round pattern for 0.00000 = 0.00
self.order_q = None # + Adaptive order quantity
self.order_q_placed = False # - Flag initial number of orders placed
self.martin = Decimal(0) # + Operational increment volume of orders in the grid
self.orders_save = Orders() # + Save for cancel time
self.first_run = True # -
self.grid_remove = None # - Flag when starting cancel grid orders
self.heartbeat_counter = 0 # -
self.grid_order_canceled = None # -
# noinspection PyProtectedMember
def init(self, check_funds: bool = True) -> None: # skipcq: PYL-W0221
self.message_log('Start Init section')
tcm = self.get_trading_capability_manager()
self.f_currency = self.get_first_currency()
self.s_currency = self.get_second_currency()
self.tlg_header = '{}, {}/{}. '.format(EXCHANGE[ID_EXCHANGE], self.f_currency, self.s_currency)
self.message_log(f"{self.tlg_header}", color=Style.B_WHITE)
self.status_time = time.time()
self.cycle_time = datetime.utcnow()
self.start_after_shift = True
self.over_price = OVER_PRICE
self.order_q = ORDER_Q
self.martin = (MARTIN + 100) / 100
if not check_funds:
self.first_run = False
if GRID_ONLY:
self.message_log('Mode for buy/sell asset by grid orders placement ON', color=Style.B_WHITE)
if Decimal('0.0') > PROFIT_REVERSE > Decimal('0.75'):
self.message_log("Incorrect value for PROFIT_REVERSE", log_level=LogLevel.ERROR)
if STANDALONE:
os._exit(1)
# Calculate round float multiplier
self.round_base = ROUND_BASE or str(tcm.round_amount(1.0123456789, RoundingType.FLOOR))
self.round_quote = ROUND_QUOTE or str(tcm.round_price(1.0123456789, RoundingType.FLOOR))
print(f"Round pattern, for base: {self.round_base}, quote: {self.round_quote}")
last_price = float2decimal(self.get_buffered_ticker().last_price)
if last_price:
print('Last ticker price: ', last_price)
self.avg_rate = last_price
df = self.get_buffered_funds().get(self.f_currency, 0)
df = float2decimal(df.available) if df else Decimal('0.0')
if USE_ALL_FIRST_FUND and df and self.cycle_buy:
self.message_log('Check USE_ALL_FIRST_FUND parameter. You may have loss on Reverse cycle',
color=Style.B_WHITE)
if self.cycle_buy:
ds = self.get_buffered_funds().get(self.s_currency, 0)
ds = float2decimal(ds.available) if ds else Decimal('0.0')
if check_funds and self.deposit_second > ds:
self.message_log('Not enough second coin for Buy cycle!', color=Style.B_RED)
if STANDALONE:
os._exit(1)
first_order_vlm = self.deposit_second * 1 * (1 - self.martin) / (1 - self.martin**ORDER_Q)
first_order_vlm /= last_price
else:
if USE_ALL_FIRST_FUND:
self.deposit_first = df
else:
if check_funds and self.deposit_first > df:
self.message_log('Not enough first coin for Sell cycle!', color=Style.B_RED)
if STANDALONE:
os._exit(1)
first_order_vlm = self.deposit_first * 1 * (1 - self.martin) / (1 - pow(self.martin, ORDER_Q))
if self.cycle_buy and first_order_vlm < float2decimal(tcm.get_min_buy_amount(float(last_price))):
self.message_log(f"Total deposit {AMOUNT_SECOND}{self.s_currency}"
f" not enough for min amount for {ORDER_Q} orders.", color=Style.B_RED)
elif not self.cycle_buy and first_order_vlm < float2decimal(tcm.get_min_sell_amount(float(last_price))):
self.message_log(f"Total deposit {self.deposit_first}{self.f_currency}"
f" not enough for min amount for {ORDER_Q} orders.", color=Style.B_RED)
buy_amount = tcm.get_min_buy_amount(float(last_price))
sell_amount = tcm.get_min_sell_amount(float(last_price))
print(f"buy_amount: {buy_amount}, sell_amount: {sell_amount}")
else:
print('Actual price not received, initialization checks skipped')
# self.message_log('End Init section')
@staticmethod
def get_strategy_config() -> StrategyConfig:
print('Get config')
s = StrategyConfig()
s.required_data_updates = {StrategyConfig.ORDER_BOOK,
StrategyConfig.FUNDS,
StrategyConfig.TICKER}
s.normalize_exchange_buy_amounts = True
return s
def save_strategy_state(self) -> Dict[str, str]:
# region ReportStatus
# Get command from Telegram
command = None
if self.connection_analytic:
cursor_analytic = self.connection_analytic.cursor()
bot_id = self.tlg_header.split('.')[0]
cursor_analytic.execute('SELECT max(message_id), text_in, bot_id\
FROM t_control WHERE bot_id=:bot_id', {'bot_id': bot_id})
row = cursor_analytic.fetchone()
if row[0]:
# Analyse and execute received command
command = row[1]
if command != 'status':
self.command = command
command = None
# Remove applied command from .db
cursor_analytic.execute('UPDATE t_control SET apply = 1 WHERE message_id=:message_id',
{'message_id': row[0]})
self.connection_analytic.commit()
# self.message_log(f"save_strategy_state.command: {self.command}", log_level=LogLevel.DEBUG)
if command or (time.time() - self.status_time) / 60 > STATUS_DELAY:
# Report current status
last_price = self.get_buffered_ticker().last_price
if self.cycle_time:
ct = str(datetime.utcnow() - self.cycle_time).rsplit('.')[0]
else:
self.message_log("save_strategy_state: cycle_time is None!", log_level=LogLevel.DEBUG)
ct = str(datetime.utcnow()).rsplit('.')[0]
if self.command == 'stopped':
self.message_log("Strategy stopped. Need manual action", tlg=True)
elif self.grid_hold:
funds = self.get_buffered_funds()
if self.cycle_buy:
fund = funds.get(self.s_currency, 0)
fund = fund.available if fund else 0
currency = self.s_currency
else:
fund = funds.get(self.f_currency, 0)
fund = fund.available if fund else 0
currency = self.f_currency
time_diff = None
if self.grid_hold.get('timestamp'):
time_diff = int(time.time() - self.grid_hold['timestamp'])
self.message_log(f"Exist unreleased grid orders for\n"
f"{'Buy' if self.cycle_buy else 'Sell'} cycle with"
f" {self.grid_hold['depo']}{currency} depo.\n"
f"Available funds is {fund} {currency}\n"
f"Last ticker price: {last_price}\n"
f"From start {ct}\n"
f"Time difference: {time_diff} sec", tlg=True)
else:
orders = self.get_buffered_open_orders()
order_buy = len([i for i in orders if i.buy is True])
order_sell = len([i for i in orders if i.buy is False])
order_hold = len(self.orders_hold)
sum_profit = self.round_truncate(self.sum_profit_first * self.avg_rate + self.sum_profit_second,
base=False)
command = bool(self.command in ('end', 'stop'))
self.message_log(f"Complete {self.cycle_buy_count} buy cycle and {self.cycle_sell_count} sell cycle\n"
f"For all cycles profit:\n"
f"First: {self.sum_profit_first}\n"
f"Second: {self.sum_profit_second}\n"
f"Summary: {sum_profit}\n"
f"* *** *** *** *\n"
f"{'Buy' if self.cycle_buy else 'Sell'}{' Reverse' if self.reverse else ''}"
f"{' Hold reverse' if self.reverse_hold else ''}"
f" {'grid only' if GRID_ONLY else 'cycle'} with {order_buy} buy"
f" and {order_sell} sell active orders.\n"
f"{order_hold if order_hold else 'No'} hold grid orders\n"
f"Over price: {self.over_price:.2f}%\n"
f"Last ticker price: {last_price}\n"
f"ver: {HEAD_VERSION}+{__version__}+{msb_ver}\n"
f"From start {ct}\n"
f"{'- *** *** *** -' if self.command == 'stop' else ''}\n"
f"{'Waiting for end of cycle for manual action' if command else ''}",
tlg=True)
# endregion
# region ProcessingEvent
if not STANDALONE and EXTRA_CHECK_ORDER_STATE:
self.heartbeat_counter += 1
if self.heartbeat_counter >= 5:
self.heartbeat_counter = 0
self.check_order_status()
if self.wait_refunding_for_start or self.tp_order_hold or self.grid_hold:
self.get_buffered_funds()
if self.tp_error:
self.tp_error = False
self.place_profit_order()
if self.reverse_hold:
last_price = self.get_buffered_ticker().last_price
if self.cycle_buy:
price_diff = 100 * (self.reverse_price - last_price) / self.reverse_price
else:
price_diff = 100 * (last_price - self.reverse_price) / self.reverse_price
if price_diff > ADX_PRICE_THRESHOLD:
# Reverse
self.cycle_buy = not self.cycle_buy
self.command = 'stop' if REVERSE_STOP else None
self.reverse = True
self.reverse_hold = False
self.sum_amount_first = self.tp_part_amount_first
self.sum_amount_second = self.tp_part_amount_second
self.part_amount_first = Decimal('0')
self.part_amount_second = Decimal('0')
self.tp_part_amount_first = Decimal('0')
self.tp_part_amount_second = Decimal('0')
self.message_log('Release Hold reverse cycle', color=Style.B_WHITE)
self.start()
grid_hold_timestamp = self.grid_hold.get('timestamp') if self.grid_hold.get('timestamp') else time.time()
if time.time() - grid_hold_timestamp > SHIFT_GRID_DELAY:
self.grid_hold['timestamp'] = None
self.message_log("Try release hold grid", tlg=True)
buy_side = self.grid_hold['buy_side']
depo = self.grid_hold['depo']
#
funds = self.get_buffered_funds()
ff = funds.get(self.f_currency, 0)
ff = self.round_truncate(float2decimal(ff.available) if ff else Decimal('0.0'), base=True)
fs = funds.get(self.s_currency, 0)
fs = self.round_truncate(float2decimal(fs.available) if fs else Decimal('0.0'), base=False)
#
if buy_side:
diff_s = self.deposit_second - fs
diff_f = diff_s / self.avg_rate
go_trade = bool(ff >= diff_f)
else:
diff_f = self.deposit_first - ff
diff_s = diff_f * self.avg_rate
go_trade = bool(fs >= diff_s)
#
self.message_log(f"go_trade: {go_trade}, ff: {ff:f}, fs: {fs:f}, avg_rate: {self.avg_rate},"
f" diff_f: {diff_f:f}, diff_s: {diff_s:f}", log_level=LogLevel.DEBUG)
if go_trade:
self.message_log(f"Release hold grid: necessary {depo}, exist {fs if buy_side else ff}\n"
f"Difference first: {diff_f}, second: {diff_s}")
self.sum_amount_first += diff_f
self.sum_amount_second += diff_s
self.message_log(f"Sum_amount_first: {self.sum_amount_first},"
f" Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
depo = fs if buy_side else ff
self.message_log(f"New depo is {depo}")
# Check min amount for placing TP
if self.check_min_amount_for_tp():
self.tp_hold_additional = True
self.place_grid(buy_side, depo, self.reverse_target_amount)
# endregion
return {'command': json.dumps(self.command),
'cycle_buy': json.dumps(self.cycle_buy),
'cycle_buy_count': json.dumps(self.cycle_buy_count),
'cycle_sell_count': json.dumps(self.cycle_sell_count),
'cycle_time': json.dumps(self.cycle_time, default=str),
'cycle_time_reverse': json.dumps(self.cycle_time_reverse, default=str),
'deposit_first': json.dumps(self.deposit_first),
'deposit_second': json.dumps(self.deposit_second),
'last_shift_time': json.dumps(self.last_shift_time),
'martin': json.dumps(self.martin),
'order_q': json.dumps(self.order_q),
'orders': json.dumps(self.orders_grid.get()),
'orders_hold': json.dumps(self.orders_hold.get()),
'orders_save': json.dumps(self.orders_save.get()),
'over_price': json.dumps(self.over_price),
'part_amount_first': json.dumps(self.part_amount_first),
'part_amount_second': json.dumps(self.part_amount_second),
'initial_first': json.dumps(self.initial_first),
'initial_second': json.dumps(self.initial_second),
'initial_reverse_first': json.dumps(self.initial_reverse_first),
'initial_reverse_second': json.dumps(self.initial_reverse_second),
'profit_first': json.dumps(self.profit_first),
'profit_second': json.dumps(self.profit_second),
'reverse': json.dumps(self.reverse),
'reverse_hold': json.dumps(self.reverse_hold),
'reverse_init_amount': json.dumps(self.reverse_init_amount),
'reverse_price': json.dumps(self.reverse_price),
'reverse_target_amount': json.dumps(self.reverse_target_amount),
'shift_grid_threshold': json.dumps(self.shift_grid_threshold),
'status_time': json.dumps(self.status_time),
'sum_amount_first': json.dumps(self.sum_amount_first),
'sum_amount_second': json.dumps(self.sum_amount_second),
'sum_profit_first': json.dumps(self.sum_profit_first),
'sum_profit_second': json.dumps(self.sum_profit_second),
'tp_amount': json.dumps(self.tp_amount),
'tp_init': json.dumps(str(self.tp_init)),
'tp_order_id': json.dumps(self.tp_order_id),
'tp_part_amount_first': json.dumps(self.tp_part_amount_first),
'tp_part_amount_second': json.dumps(self.tp_part_amount_second),
'tp_target': json.dumps(self.tp_target),
'tp_order': json.dumps(str(self.tp_order)),
'tp_wait_id': json.dumps(self.tp_wait_id)}
def restore_strategy_state(self, strategy_state: Dict[str, str] = None) -> None:
if strategy_state:
# Restore from file if lose state only
self.message_log("restore_strategy_state from saved state:", log_level=LogLevel.DEBUG)
self.message_log("\n".join(f"{k}\t{v}" for k, v in strategy_state.items()), log_level=LogLevel.DEBUG)
#
self.command = json.loads(strategy_state.get('command'))
self.cycle_buy = json.loads(strategy_state.get('cycle_buy'))
self.cycle_buy_count = json.loads(strategy_state.get('cycle_buy_count'))
self.cycle_sell_count = json.loads(strategy_state.get('cycle_sell_count'))
self.cycle_time = datetime.strptime(json.loads(strategy_state.get('cycle_time')), '%Y-%m-%d %H:%M:%S.%f')
self.cycle_time_reverse = json.loads(strategy_state.get('cycle_time_reverse'))
if self.cycle_time_reverse:
self.cycle_time_reverse = datetime.strptime(self.cycle_time_reverse, '%Y-%m-%d %H:%M:%S.%f')
else:
self.cycle_time_reverse = None
self.deposit_first = float2decimal(json.loads(strategy_state.get('deposit_first')))
self.deposit_second = float2decimal(json.loads(strategy_state.get('deposit_second')))
self.last_shift_time = json.loads(strategy_state.get('last_shift_time'))
self.martin = float2decimal(json.loads(strategy_state.get('martin')))
self.order_q = json.loads(strategy_state.get('order_q'))
self.orders_grid.restore(json.loads(strategy_state.get('orders')))
self.orders_hold.restore(json.loads(strategy_state.get('orders_hold')))
self.orders_save.restore(json.loads(strategy_state.get('orders_save')))
self.over_price = json.loads(strategy_state.get('over_price'))
self.part_amount_first = float2decimal(json.loads(strategy_state.get('part_amount_first')))
self.part_amount_second = float2decimal(json.loads(strategy_state.get('part_amount_second')))
self.initial_first = float2decimal(json.loads(strategy_state.get('initial_first')))
self.initial_second = float2decimal(json.loads(strategy_state.get('initial_second')))
self.initial_reverse_first = float2decimal(json.loads(strategy_state.get('initial_reverse_first')))
self.initial_reverse_second = float2decimal(json.loads(strategy_state.get('initial_reverse_second')))
self.profit_first = float2decimal(json.loads(strategy_state.get('profit_first')))
self.profit_second = float2decimal(json.loads(strategy_state.get('profit_second')))
self.reverse = json.loads(strategy_state.get('reverse'))
self.reverse_hold = json.loads(strategy_state.get('reverse_hold'))
self.reverse_init_amount = float2decimal(json.loads(strategy_state.get('reverse_init_amount')))
self.reverse_price = json.loads(strategy_state.get('reverse_price'))
self.reverse_target_amount = json.loads(strategy_state.get('reverse_target_amount'))
self.shift_grid_threshold = json.loads(strategy_state.get('shift_grid_threshold'))
self.status_time = json.loads(strategy_state.get('status_time'))
self.sum_amount_first = float2decimal(json.loads(strategy_state.get('sum_amount_first')))
self.sum_amount_second = float2decimal(json.loads(strategy_state.get('sum_amount_second')))
self.sum_profit_first = float2decimal(json.loads(strategy_state.get('sum_profit_first')))
self.sum_profit_second = float2decimal(json.loads(strategy_state.get('sum_profit_second')))
self.tp_amount = float2decimal(json.loads(strategy_state.get('tp_amount')))
self.tp_init = eval(json.loads(strategy_state.get('tp_init')))
self.tp_order_id = json.loads(strategy_state.get('tp_order_id'))
self.tp_part_amount_first = float2decimal(json.loads(strategy_state.get('tp_part_amount_first')))
self.tp_part_amount_second = float2decimal(json.loads(strategy_state.get('tp_part_amount_second')))
self.tp_target = float2decimal(json.loads(strategy_state.get('tp_target')))
self.tp_order = eval(json.loads(strategy_state.get('tp_order'))) if strategy_state.get('tp_order') else ()
self.tp_wait_id = json.loads(strategy_state.get('tp_wait_id')) if strategy_state.get('tp_wait_id') else None
# Variants are processed when the actual order is equal to or less than it should be
# Exotic when drop during placed grid or unconfirmed TP left for later
self.start_process()
open_orders = self.get_buffered_open_orders(True) # lgtm [py/call/wrong-arguments]
tp_order = None
# Separate TP order
if self.tp_order_id:
for i, o in enumerate(open_orders):
if o.id == self.tp_order_id:
tp_order = open_orders[i]
del open_orders[i] # skipcq: PYL-E1138
break
# Possible strategy states in compare with saved one
grid_orders_len = len(self.orders_grid)
open_orders_len = len(open_orders)
grid_no_change = grid_orders_len == open_orders_len # Grid No change
grid_less = grid_orders_len > open_orders_len > 0
grid_hold = open_orders_len == 0 and self.orders_hold
grid_more = grid_orders_len < open_orders_len # Grid more, some order(s) was placed
grid_filled = grid_orders_len > 0 and open_orders_len == 0 and not self.orders_hold # Grid was complete
tp_no_change = (tp_order and self.tp_order_id) or (not tp_order and not self.tp_order_id)
tp_placed = tp_order and not self.tp_order_id
tp_filled = not tp_order and self.tp_order_id
no_orders = grid_orders_len == 0 and not self.tp_order_id
#
self.avg_rate = float2decimal(self.get_buffered_ticker().last_price)
#
if self.command == 'stopped':
self.message_log("Restore, strategy stopped. Need manual action", tlg=True)
elif grid_no_change and tp_no_change:
if grid_hold:
self.message_log("Restore, no grid orders, place from hold now", tlg=True)
self.place_grid_part()
elif no_orders:
self.restart = True
self.message_log("Restore, no orders, restart", tlg=True)
self.start()
elif not self.tp_order_id and self.check_min_amount_for_tp():
self.message_log("Restore, replace TP", tlg=True)
self.place_profit_order()
else:
self.message_log("Restore, No difference, go work", tlg=True)
elif grid_filled and tp_filled:
self.message_log("Restore, all grid orders and TP was filled", tlg=True)
# Get actual parameter of filled tp order
market_order = self.get_buffered_completed_trades(True)
amount_first = Decimal('0')
amount_second = Decimal('0')
for o in market_order:
if o.order_id == self.tp_order_id:
amount_first += float2decimal(o.amount)
amount_second += float2decimal(o.amount) * float2decimal(o.price)
if amount_first == 0:
# If execution event was missed
_buy, _amount, _price = self.tp_order
amount_first = self.round_truncate(float2decimal(_amount), base=True)
amount_second = self.round_truncate(float2decimal(_amount * _price), base=False)
self.tp_was_filled = (amount_first, amount_second, True)
self.tp_order_id = None
self.tp_order = ()
self.message_log(f"restore_strategy_state.was_filled_tp: {self.tp_was_filled}", log_level=LogLevel.DEBUG)
# Calculate sum trade amount for both currency
amount_first = Decimal('0')
amount_second = Decimal('0')
for i in self.orders_grid:
amount_first += i['amount']
amount_second += i['amount'] * i['price']
print(f"id={i['id']}, first: {i['amount']}, price: {i['price']}")
print(f"Total grid amount first: {amount_first}, second: {amount_second}")
# Clear list of grid order
self.orders_grid.orders_list.clear()
self.grid_handler(_amount_first=amount_first, _amount_second=amount_second, after_full_fill=True)
elif grid_filled and tp_no_change:
self.message_log('Restore, No grid orders -> Reverse', tlg=True)
# Admit that missing orders were executed on conditions no worse than those saved
# Calculate sum trade amount for both currency
amount_first = Decimal('0')
amount_second = Decimal('0')
for i in self.orders_grid:
amount_first += i['amount']
amount_second += i['amount'] * i['price']
print(f"id={i['id']}, first: {i['amount']}, price: {i['price']}")
print(f"Total amount first: {amount_first}, second: {amount_second}")
# Clear list of grid order
self.orders_grid.orders_list.clear()
self.grid_handler(_amount_first=amount_first, _amount_second=amount_second, after_full_fill=True)
elif grid_less and tp_filled:
self.message_log("Restore, some grid orders and TP was filled", tlg=True)
# Get actual parameter of filled tp order
market_order = self.get_buffered_completed_trades(True)
amount_first = Decimal('0')
amount_second = Decimal('0')
for o in market_order:
if o.order_id == self.tp_order_id:
amount_first += float2decimal(o.amount)
amount_second += float2decimal(o.amount) * float2decimal(o.price)
print(f"order_id={o.order_id}, first: {o.amount}, price: {o.price}")
if amount_first == 0:
# If execution event was missed
_buy, _amount, _price = self.tp_order
amount_first = self.round_truncate(float2decimal(_amount), base=True)
amount_second = self.round_truncate(float2decimal(_amount * _price), base=False)
self.tp_was_filled = (amount_first, amount_second, True)
self.tp_order_id = None
self.tp_order = ()
self.message_log(f"restore_strategy_state.was_filled_tp: {self.tp_was_filled}", log_level=LogLevel.DEBUG)
# Calculate sum trade amount for both currency
exch_orders_id = []
save_orders_id = []
for i in open_orders:
exch_orders_id.append(i.id)
for i in self.orders_grid:
save_orders_id.append(i.get('id'))
print(f"restore_strategy_state.exch_orders_id: {exch_orders_id}")
print(f"restore_strategy_state.save_orders_id: {save_orders_id}")
diff_id = list(set(save_orders_id).difference(exch_orders_id))
print(f"Executed order id is: {diff_id}")
# Calculate sum trade amount for both currency
amount_first = Decimal('0')
amount_second = Decimal('0')
for i in self.orders_grid:
if i['id'] in diff_id:
amount_first += i['amount']
amount_second += i['amount'] * i['price']
print(f"id={i['id']}, first: {i['amount']}, price: {i['price']}")
self.message_log(f"Total amount first: {amount_first:f}, second: {amount_second:f}", color=Style.B_WHITE)
# Remove from list of grid order
for i in diff_id:
self.orders_grid.remove(i)
# Calculate trade amount with Fee
amount_first_fee, amount_second_fee = self.fee_for_grid(amount_first, amount_second)
# Calculate cycle sum trading for both currency
self.sum_amount_first += amount_first_fee
self.sum_amount_second += amount_second_fee
if open_orders_len == 0 and self.orders_hold:
self.place_grid_part()
self.after_filled_tp(one_else_grid=True)
elif grid_less:
self.message_log("Restore, Less grid orders -> replace tp order", tlg=True)
exch_orders_id = []
save_orders_id = []
for i in open_orders:
exch_orders_id.append(i.id)
for i in self.orders_grid:
save_orders_id.append(i.get('id'))
print(f"restore_strategy_state.exch_orders_id: {exch_orders_id}")
print(f"restore_strategy_state.save_orders_id: {save_orders_id}")
diff_id = list(set(save_orders_id).difference(exch_orders_id))
print(f"Executed order id is: {diff_id}")
# Calculate sum trade amount for both currency
amount_first = Decimal('0')
amount_second = Decimal('0')
for i in self.orders_grid:
if i['id'] in diff_id:
amount_first += i['amount']
amount_second += i['amount'] * i['price']
print(f"id={i['id']}, first: {i['amount']}, price: {i['price']}")
self.message_log(f"Total amount first: {amount_first:f}, second: {amount_second:f}", color=Style.B_WHITE)
# Remove from list of grid order
for i in diff_id:
self.orders_grid.remove(i)
self.grid_handler(_amount_first=amount_first, _amount_second=amount_second, after_full_fill=True)
elif tp_filled:
self.message_log('Restore, TP order was filled -> Restart', tlg=True)
# Get actual parameter of filled tp order
market_order = self.get_buffered_completed_trades(True)
amount_first = Decimal('0')
amount_second = Decimal('0')
for o in market_order:
if o.order_id == self.tp_order_id:
amount_first += float2decimal(o.amount)
amount_second += float2decimal(o.amount) * float2decimal(o.price)
print(f"order_id={o.order_id}, first: {o.amount}, price: {o.price}")
if amount_first == 0:
# If execution event was missed
_buy, _amount, _price = self.tp_order
amount_first = self.round_truncate(float2decimal(_amount), base=True)
amount_second = self.round_truncate(float2decimal(_amount * _price), base=False)
self.tp_was_filled = (amount_first, amount_second, True)
self.tp_order_id = None
self.tp_order = ()
self.grid_remove = True
self.cancel_grid()
elif grid_more and self.orders_init:
self.message_log('Restore, was placed some grid order(s)', tlg=True)
elif tp_placed:
self.message_log('Restore, was placed take profit order(s)', tlg=True)
else:
self.message_log('Restore, some else. Need investigations.', tlg=True)
# self.unsuspend()
def start(self) -> None:
self.message_log('Start')
# Cancel take profit order in all state
self.tp_order_hold.clear()
self.tp_hold = False
self.tp_was_filled = ()
self.order_q_placed = False
self.grid_place_flag = False
if self.tp_order_id:
self.tp_cancel = True
if not self.cancel_order_id:
self.cancel_order_id = self.tp_order_id
self.cancel_order(self.tp_order_id)
return
if self.tp_wait_id:
# Wait tp order and cancel in on_cancel_order_success and restart
self.tp_cancel = True
return
funds = self.get_buffered_funds()
ff = funds.get(self.f_currency, 0)
ff = float2decimal(ff.total_for_currency) if ff else Decimal('0.0')
fs = funds.get(self.s_currency, 0)
fs = float2decimal(fs.total_for_currency) if fs else Decimal('0.0')
# Save initial funds and cycle statistics to .db for external analytics
if self.first_run:
self.start_process()
if self.reverse:
self.initial_reverse_first = self.round_truncate(ff, base=True)
self.initial_reverse_second = self.round_truncate(fs, base=False)
else:
self.initial_first = self.round_truncate(ff, base=True)
self.initial_second = self.round_truncate(fs, base=False)
elif self.restart and not GRID_ONLY:
if self.reverse:
delta_f = ff - self.initial_reverse_first
delta_s = fs - self.initial_reverse_second
else:
delta_f = ff - self.initial_first
delta_s = fs - self.initial_second
delta = delta_f * self.avg_rate + delta_s
self.message_log(f"Operational difference from initial funds: {delta}")
go_trade = True
if delta < 0:
tcm = self.get_trading_capability_manager()
# Set maximum loss at 10% from minimum lot size, if was a rounding error
min_delta = float2decimal(0.1 * tcm.get_min_buy_amount(float(self.avg_rate))) * self.avg_rate
if delta.copy_abs() > min_delta:
go_trade = False
if self.wait_refunding_for_start or go_trade:
self.wait_refunding_for_start = False
if self.reverse:
self.initial_reverse_first = self.round_truncate(ff, base=True, _rounding=ROUND_FLOOR)
self.initial_reverse_second = self.round_truncate(fs, base=False, _rounding=ROUND_FLOOR)
ff = self.initial_first
fs = self.initial_second
pf = PROFIT_REVERSE * self.profit_first / (1 - PROFIT_REVERSE)
ps = PROFIT_REVERSE * self.profit_second / (1 - PROFIT_REVERSE)
else:
self.initial_first = self.round_truncate(ff, base=True, _rounding=ROUND_FLOOR)
self.initial_second = self.round_truncate(fs, base=False, _rounding=ROUND_FLOOR)
pf = self.profit_first
ps = self.profit_second
if self.cycle_buy:
df = Decimal('0')
ds = self.deposit_second - self.profit_second
else:
df = self.deposit_first - self.profit_first
ds = Decimal('0')
ct = datetime.utcnow() - self.cycle_time
ct = ct.total_seconds()
data_to_db = {'f_currency': self.f_currency,
's_currency': self.s_currency,
'f_funds': ff,
's_funds': fs,
'avg_rate': self.avg_rate,
'cycle_buy': self.cycle_buy,
'f_depo': df,
's_depo': ds,
'f_profit': pf,
's_profit': ps,
'order_q': self.order_q,
'over_price': self.over_price,
'cycle_time': ct}
if self.queue_to_db:
print('Save data to .db')
self.queue_to_db.put(data_to_db)
else:
self.wait_refunding_for_start = True
self.message_log(f"Wait refunding for start, having now: first: {ff}, second: {fs}")
return
self.avg_rate = float2decimal(self.get_buffered_ticker().last_price)
if not self.first_run and not self.start_after_shift and not self.reverse and not GRID_ONLY:
self.message_log(f"Complete {self.cycle_buy_count} buy cycle and {self.cycle_sell_count} sell cycle\n"
f"For all cycles profit:\n"
f"First: {self.sum_profit_first}\n"
f"Second: {self.sum_profit_second}\n"
f"Summary: {self.sum_profit_first * self.avg_rate + self.sum_profit_second:f}\n")
mem = psutil.virtual_memory().percent
if mem > 80:
self.message_log(f"For {VPS_NAME} critical memory availability, end", tlg=True)
self.command = 'end'
elif mem > 70:
self.message_log(f"For {VPS_NAME} low memory availability, stop after end of cycle", tlg=True)
self.command = 'stop'
if self.command == 'end' or (self.command == 'stop' and
(not self.reverse or (self.reverse and REVERSE_STOP))):
self.command = 'stopped'
self.message_log('Stop, waiting manual action', tlg=True)
else:
n = gc.collect(generation=2)
print('Number of unreachable objects collected by GC:', n)
self.message_log(f"Initial first: {self.initial_reverse_first if self.reverse else self.initial_first},"
f" second: {self.initial_reverse_second if self.reverse else self.initial_second}",
color=Style.B_WHITE)
self.restart = None
# Init variable
self.profit_first = Decimal('0')
self.profit_second = Decimal('0')
self.cycle_time = datetime.utcnow()
self.over_price = OVER_PRICE
self.order_q = ORDER_Q
if self.cycle_buy:
amount = self.deposit_second
if not self.start_after_shift or self.first_run:
self.message_log(f"Start Buy{' Reverse' if self.reverse else ''}"
f" {'asset' if GRID_ONLY else 'cycle'} with "
f"{amount:f} {self.s_currency} depo", tlg=True)
else:
if USE_ALL_FIRST_FUND and (self.reverse or GRID_ONLY):
ff = funds.get(self.f_currency, 0)
fund = float2decimal(ff.available) if ff else Decimal('0.0')
if fund > self.deposit_first:
self.deposit_first = fund
self.message_log('Use all available fund for first currency')
self.deposit_first = self.round_truncate(self.deposit_first, base=True)
amount = self.deposit_first
if not self.start_after_shift or self.first_run:
self.message_log(f"Start Sell{' Reverse' if self.reverse else ''}"
f" {'asset' if GRID_ONLY else 'cycle'} with "
f"{amount:f} {self.f_currency} depo", tlg=True)
if self.reverse:
self.message_log(f"For Reverse cycle target return amount: {self.reverse_target_amount}",
color=Style.B_WHITE)
self.start_after_shift = False
self.first_run = False
self.debug_output()
self.place_grid(self.cycle_buy, amount, self.reverse_target_amount)
def stop(self) -> None:
self.message_log('Stop')
self.queue_to_db.put({'stop_signal': True})
self.queue_to_tlg.put('stop_signal_QWE#@!')
self.connection_analytic.commit()
self.connection_analytic.close()
self.connection_analytic = None
def suspend(self) -> None:
print('Suspend')
self.queue_to_db.put({'stop_signal': True})
self.queue_to_tlg.put('stop_signal_QWE#@!')
self.connection_analytic.commit()
self.connection_analytic.close()
self.connection_analytic = None
def unsuspend(self) -> None:
print('Unsuspend')
self.start_process()
##############################################################
# strategy function
##############################################################
def place_grid(self,
buy_side: bool,
depo: Decimal,
reverse_target_amount: float,
allow_grid_shift: bool = True) -> None:
self.message_log(f"place_grid: buy_side:{buy_side}, depo: {depo},"
f" reverse_target_amount: {reverse_target_amount},"
f" allow_grid_shift: {allow_grid_shift}", log_level=LogLevel.DEBUG)
self.grid_hold.clear()
self.last_shift_time = None
tcm = self.get_trading_capability_manager()
if buy_side:
max_bid_price = self.get_buffered_order_book().bids[0].price
base_price = max_bid_price - PRICE_SHIFT * max_bid_price / 100
min_amount = tcm.get_min_buy_amount(base_price)
else:
min_ask_price = self.get_buffered_order_book().asks[0].price
base_price = min_ask_price + PRICE_SHIFT * min_ask_price / 100
min_amount = tcm.get_min_sell_amount(base_price)
# print(f"place_grid.base_price: {base_price}")
min_delta = tcm.get_minimal_price_change(base_price)
if ADAPTIVE_TRADE_CONDITION or self.reverse:
try:
self.set_trade_conditions(buy_side, float(depo), base_price, reverse_target_amount,
min_amount, min_delta)
except Exception as ex:
self.message_log(f"Do not set trade conditions: {ex}", log_level=LogLevel.ERROR, color=Style.B_RED)
self.over_price = OVER_PRICE
self.order_q = ORDER_Q
self.message_log(f"For{' Reverse' if self.reverse else ''} {'Buy' if buy_side else 'Sell'}"
f" cycle set {self.order_q} orders for {self.over_price:.2f}% over price", tlg=False)
# Decimal zone
base_price_dec = float2decimal(base_price)
min_delta_dec = float2decimal(min_delta)
delta_price = self.over_price * base_price_dec / (100 * (self.order_q - 1))
funds = self.get_buffered_funds()
price_prev = base_price_dec
if buy_side:
fund = funds.get(self.s_currency, 0)
fund = float2decimal(fund.available) if fund else Decimal('0.0')
currency = self.s_currency
else:
fund = funds.get(self.f_currency, 0)
fund = float2decimal(fund.available) if fund else Decimal('0.0')
currency = self.f_currency
if depo <= fund:
for i in range(self.order_q):
if LINEAR_GRID_K >= 0:
price_k = float2decimal(1 - math.log(self.order_q - i, self.order_q + LINEAR_GRID_K))
else:
price_k = 1
if buy_side:
price = base_price_dec - i * delta_price * price_k
else:
price = base_price_dec + i * delta_price * price_k
price = float2decimal(tcm.round_price(float(price), RoundingType.ROUND))
if buy_side:
if i and price_prev - price < min_delta_dec:
price = price_prev - min_delta_dec
else:
if i and price - price_prev < min_delta_dec:
price = price_prev + min_delta_dec
price_prev = price
# print(f"place_grid.round_price: {price}")
amount = depo * self.martin**i * (1 - self.martin) / (1 - self.martin**self.order_q)
if buy_side:
amount /= price
amount = self.round_truncate(amount, base=True)
# create order for grid
if i < GRID_MAX_COUNT:
waiting_order_id = self.place_limit_order(buy_side, float(amount), float(price))
self.orders_init.append(waiting_order_id, buy_side, amount, price)
else:
self.orders_hold.append(i, buy_side, amount, price)
if not GRID_ONLY and allow_grid_shift:
if buy_side:
self.shift_grid_threshold = base_price + 2 * PRICE_SHIFT * base_price / 100
else:
self.shift_grid_threshold = base_price - 2 * PRICE_SHIFT * base_price / 100
self.message_log(f"Shift grid threshold: {self.shift_grid_threshold:.2f}")
else:
self.grid_hold = {'buy_side': buy_side,
'depo': depo,
'timestamp': time.time()}
self.message_log(f"Hold grid for {'Buy' if buy_side else 'Sell'} cycle with {depo} {currency} depo."
f" Available funds is {fund} {currency}", tlg=False)
if self.tp_hold_additional:
self.message_log("Replace take profit order after place additional grid orders", tlg=True)
self.tp_hold_additional = False
self.place_profit_order()
def place_profit_order(self, by_market: bool = False) -> None:
# TODO Check for min amount
if not GRID_ONLY:
self.message_log(f"place_profit_order: by_market: {by_market}", log_level=LogLevel.DEBUG)
self.tp_order_hold.clear()
if self.tp_wait_id or self.cancel_order_id or self.tp_was_filled:
# Waiting confirm or cancel old or processing ending and replace it
self.tp_hold = True
self.message_log('Waiting finished TP order for replace', color=Style.B_WHITE)
elif self.tp_order_id:
# Cancel take profit order, place new
self.tp_hold = True
self.cancel_order_id = self.tp_order_id
self.cancel_order(self.tp_order_id)
self.message_log('Hold take profit order, replace existing', color=Style.B_WHITE)
else:
buy_side = not self.cycle_buy
# Calculate take profit order
tp = self.calc_profit_order(buy_side, by_market=by_market)
price = tp.get('price')
amount = tp.get('amount')
profit = tp.get('profit')
target = tp.get('target')
# Check funds available
funds = self.get_buffered_funds()
if buy_side:
fund = funds.get(self.s_currency, 0)
fund = float2decimal(fund.available) if fund else Decimal('0.0')
else:
fund = funds.get(self.f_currency, 0)
fund = float2decimal(fund.available) if fund else Decimal('0.0')
if buy_side and amount * price > fund:
# Save take profit order and wait update balance
self.tp_order_hold = {'buy_side': buy_side,
'amount': amount * price}
self.message_log(f"Hold take profit order for Buy {amount} {self.f_currency} by {price},"
f" wait {amount * price} {self.s_currency}, exist: {fund}")
elif not buy_side and amount > fund:
# Save take profit order and wait update balance
self.tp_order_hold = {'buy_side': buy_side, 'amount': amount}
self.message_log(f"Hold take profit order for Sell {amount} {self.f_currency}"
f" by {price}, exist {fund}")
else:
# Create take profit order
self.message_log(f"Create {'Buy' if buy_side else 'Sell'} take profit order,"
f" vlm: {amount}, price: {price}, profit: {profit}%")
self.tp_target = target
_amount = float(amount)
_price = float(price)
if not STANDALONE:
tcm = self.get_trading_capability_manager()
if not tcm.is_limit_order_valid(buy_side, _amount, _price):
_amount = tcm.round_amount(_amount, RoundingType.FLOOR)
if buy_side:
_price = tcm.round_price(_price, RoundingType.FLOOR)
else:
_price = tcm.round_price(_price, RoundingType.CEIL)
self.message_log(f"Rounded amount: {_amount}, price: {_price}")
self.tp_order = (buy_side, _amount, _price)
self.tp_wait_id = self.place_limit_order(buy_side, _amount, _price)
def message_log(self, msg: str, log_level=LogLevel.INFO, tlg: bool = False, color=Style.WHITE) -> None:
if tlg and color == Style.WHITE:
color = Style.B_WHITE
if log_level in (LogLevel.ERROR, LogLevel.CRITICAL):
tlg = True
color = Style.B_RED
color = color if STANDALONE else 0
color_msg = color+msg+Style.RESET if color else msg
if log_level not in LOG_LEVEL_NO_PRINT:
print(f"{datetime.now().strftime('%d/%m %H:%M:%S')} {color_msg}")
write_log(log_level, msg)
msg = self.tlg_header + msg
if tlg and self.queue_to_tlg:
self.status_time = time.time()
self.queue_to_tlg.put(msg)
def bollinger_band(self, candle_size_in_minutes: int, number_of_candles: int) -> Dict[str, float]:
# Bottom BB as sma-kb*stdev
# Top BB as sma+kt*stdev
# For Buy cycle over_price as 100*(Ticker.last_price - bbb) / Ticker.last_price
# For Sale cycle over_price as 100*(tbb - Ticker.last_price) / Ticker.last_price
candle_close = []
candle = self.get_buffered_recent_candles(candle_size_in_minutes=candle_size_in_minutes,
number_of_candles=number_of_candles,
include_current_building_candle=True)
for i in candle:
candle_close.append(i.close)
# print(f"bollinger_band.candle_close: {candle_close}")
sma = statistics.mean(candle_close)
st_dev = statistics.stdev(candle_close)
# print('sma={}, st_dev={}'.format(sma, st_dev))
tbb = sma + KBB * st_dev
bbb = sma - KBB * st_dev
self.message_log(f"bollinger_band: tbb={tbb}, bbb={bbb}")
return {'tbb': tbb, 'bbb': bbb}
def set_trade_conditions(self,
buy_side: bool,
depo: float,
base_price: float,
reverse_target_amount: float,
amount_min: float,
delta_min: float) -> None:
self.message_log(f"set_trade_conditions: buy_side: {buy_side}, depo: {depo}, base_price: {base_price},"
f" reverse_target_amount: {reverse_target_amount}, amount_min: {amount_min},"
f" delta_min: {delta_min}", LogLevel.DEBUG)
if buy_side:
depo /= base_price
over_price_min = max(100 * delta_min * ORDER_Q / base_price, 0.01 * ORDER_Q)
# print('over_price_min: {}'.format(over_price_min))
if self.reverse:
over_price = self.calc_over_price(buy_side, depo, base_price, reverse_target_amount)
else:
bb = self.bollinger_band(BB_CANDLE_SIZE_IN_MINUTES, BB_NUMBER_OF_CANDLES)
if buy_side:
bbb = bb.get('bbb')
over_price = 100*(base_price - bbb) / base_price
else:
tbb = bb.get('tbb')
over_price = 100 * (tbb - base_price) / base_price
self.over_price = float2decimal(over_price if over_price >= over_price_min else
OVER_PRICE if over_price <= 0 else over_price_min)
# Adapt grid orders quantity for current over price
order_q = int(float(self.over_price) * ORDER_Q / float(OVER_PRICE))
q_max = int(math.log(1 - depo * (1 - float(self.martin)) / (amount_min * 1.5), float(self.martin)))
while True:
delta_price = float(self.over_price) * base_price / (100 * (q_max - 1))
if LINEAR_GRID_K >= 0:
price_k = 1 - math.log(q_max - 1, q_max + LINEAR_GRID_K)
else:
price_k = 1
delta = delta_price * price_k
if delta > delta_min or q_max <= ORDER_Q:
break
q_max -= 1
self.order_q = q_max if order_q > q_max else order_q if order_q >= ORDER_Q else ORDER_Q
# Correction over_price after change quantity of order
if self.reverse:
over_price = self.calc_over_price(buy_side, depo, base_price, reverse_target_amount, exactly=True)
self.over_price = float2decimal(over_price if over_price >= over_price_min else over_price_min)
def set_profit(self) -> Decimal:
self.message_log("set_profit", LogLevel.DEBUG)
last_price = None
tbb = None
bbb = None
try:
bb = self.bollinger_band(15, 20)
except statistics.StatisticsError:
self.message_log("Set profit Exception, can't calculate BollingerBand, set profit by default",
log_level=LogLevel.WARNING)
else:
tbb = bb.get('tbb')
bbb = bb.get('bbb')
last_price = self.get_buffered_ticker().last_price
if last_price and tbb and bbb:
if self.cycle_buy:
profit = PROFIT_K * 100 * (tbb - last_price) / last_price
else:
profit = PROFIT_K * 100 * (last_price - bbb) / last_price
profit = min(max(profit, PROFIT), PROFIT_MAX)
else:
profit = PROFIT
return Decimal(profit).quantize(Decimal("1.00"), rounding=ROUND_CEILING)
def calc_profit_order(self, buy_side: bool, by_market: bool = False) -> Dict[str, Decimal]:
"""
Calculation based on amount value
:param buy_side: for take profit order, inverse to current cycle
:param by_market:
:return:
"""
self.message_log(f"calc_profit_order: buy_side: {buy_side}, by_market: {by_market}", LogLevel.DEBUG)
tcm = self.get_trading_capability_manager()
# Calculate take profit order
n = len(self.orders_grid) + len(self.orders_init) + len(self.orders_hold) + len(self.orders_save)
if PROFIT_MAX and (n > 1 or self.reverse):
profit = self.set_profit()
else:
profit = PROFIT
if by_market:
fee = FEE_TAKER
else:
fee = FEE_MAKER
fee = fee if FEE_IN_PAIR else fee + FEE_MAKER
if buy_side:
# Calculate target amount for first
self.tp_amount = self.sum_amount_first
target_amount_first = self.sum_amount_first + (fee + profit) * self.sum_amount_first / 100
target_amount_first = self.round_truncate(target_amount_first, base=True, _rounding=ROUND_CEILING)
target = target_amount_first
self.message_log(f"calc_profit_order.target_amount_first: {target_amount_first}",
log_level=LogLevel.DEBUG)
# Calculate depo amount in second
amount_s = self.round_truncate(self.sum_amount_second, base=False, _rounding=ROUND_FLOOR)
price = amount_s / target_amount_first
price = float2decimal(tcm.round_price(float(price), RoundingType.FLOOR))
amount = amount_s / price
amount = self.round_truncate(amount, base=True, _rounding=ROUND_FLOOR)
else:
# Calculate target amount for second
self.tp_amount = self.sum_amount_second
target_amount_second = self.sum_amount_second + (fee + profit) * self.sum_amount_second / 100
target_amount_second = self.round_truncate(target_amount_second, base=False, _rounding=ROUND_CEILING)
target = target_amount_second
self.message_log(f"calc_profit_order.target_amount_second: {target_amount_second}",
log_level=LogLevel.DEBUG)
# Calculate depo amount in second
amount = self.round_truncate(self.sum_amount_first, base=True, _rounding=ROUND_FLOOR)
price = target_amount_second / amount
price = float2decimal(tcm.round_price(float(price), RoundingType.CEIL))
self.tp_init = (self.sum_amount_first, self.sum_amount_second)
self.message_log(f"calc_profit_order: Initial depo for TP: {self.tp_amount}", log_level=LogLevel.DEBUG)
return {'price': price, 'amount': amount, 'profit': profit, 'target': target}
def calc_over_price(self,
buy_side: bool,
depo: float,
base_price: float,
reverse_target_amount: float,
exactly: bool = False) -> float:
self.message_log(f"calc_over_price: buy_side: {buy_side}, depo: {depo}, base_price: {base_price},"
f" reverse_target_amount: {reverse_target_amount},"
f" exactly: {exactly}", log_level=LogLevel.DEBUG)
# Calculate over price for depo refund after Reverse cycle
# Coarse Search y = kx + b and calculate over_price for target return amount
over_price = 0.0
b = self.calc_grid_avg(buy_side, depo, base_price, over_price)
# print('calc_grid_avg(0): {}'.format(b))
over_price = 50.0
grid_amount_50 = self.calc_grid_avg(buy_side, depo, base_price, over_price)
# print('calc_grid_avg(50): {}'.format(grid_amount_50))
k = (grid_amount_50 - b) / over_price
over_price = (reverse_target_amount - b) / k
# print('over_price coarse: {}'.format(over_price))
# Fine calculate over_price for target return amount
if exactly and over_price > 0.0:
i = 0
while True:
dy = reverse_target_amount - self.calc_grid_avg(buy_side, depo, base_price, over_price)
# print('dy: {}'.format(dy))
if dy <= 0:
return over_price
dx = dy / k
over_price += dx
i += 1
return over_price
def adx(self, adx_candle_size_in_minutes: int, adx_number_of_candles: int, adx_period: int) -> Dict[str, float]:
"""
Average Directional Index
Math from https://blog.quantinsti.com/adx-indicator-python/
Test data
high = [90, 95, 105, 120, 140, 165, 195, 230, 270, 315, 365]
low = [82, 85, 93, 106, 124, 147, 175, 208, 246, 289, 337]
close = [87, 87, 97, 114, 133, 157, 186, 223, 264, 311, 350]
##############################################################
"""
high = []
low = []
close = []
candle = self.get_buffered_recent_candles(candle_size_in_minutes=adx_candle_size_in_minutes,
number_of_candles=adx_number_of_candles,
include_current_building_candle=True)
for i in candle:
high.append(i.high)
low.append(i.low)
close.append(i.close)
dm_pos = []
dm_neg = []
tr_arr = []
dm_pos_smooth = []
dm_neg_smooth = []
tr_smooth = []
di_pos = []
di_neg = []
dx = []
n = 1
n_max = len(high) - 1
while n <= n_max:
m_pos = high[n] - high[n - 1]
m_neg = low[n - 1] - low[n]
_m_pos = 0
_m_neg = 0
if m_pos and m_pos > m_neg:
_m_pos = m_pos
if m_neg and m_neg > m_pos:
_m_neg = m_neg
dm_pos.append(_m_pos)
dm_neg.append(_m_neg)
tr = max(high[n], close[n - 1]) - min(low[n], close[n - 1])
tr_arr.append(tr)
if n == adx_period:
dm_pos_smooth.append(sum(dm_pos))
dm_neg_smooth.append(sum(dm_neg))
tr_smooth.append(sum(tr_arr))
if n > adx_period:
dm_pos_smooth.append((dm_pos_smooth[-1] - dm_pos_smooth[-1] / adx_period) + _m_pos)
dm_neg_smooth.append((dm_neg_smooth[-1] - dm_neg_smooth[-1] / adx_period) + _m_neg)
tr_smooth.append((tr_smooth[-1] - tr_smooth[-1] / adx_period) + tr)
if n >= adx_period:
# Calculate +DI, -DI and DX
di_pos.append(100 * dm_pos_smooth[-1] / tr_smooth[-1])
di_neg.append(100 * dm_neg_smooth[-1] / tr_smooth[-1])
dx.append(100 * abs(di_pos[-1] - di_neg[-1]) / abs(di_pos[-1] + di_neg[-1]))
n += 1
_adx = statistics.mean(dx[len(dx) - adx_period::])
return {'adx': _adx, '+DI': di_pos[-1], '-DI': di_neg[-1]}
def calc_grid_avg(self, buy_side: bool, depo: float, base_price: float, over_price: float) -> float:
# self.message_log(f"calc_grid_avg: buy_side: {buy_side}, depo: {depo}, base_price: {base_price},"
# f" over_price: {over_price}", log_level=LogLevel.DEBUG)
# Calculate return average amount in second coin for grid orders with fixed initial parameters
martin_float = float(self.martin)
if buy_side:
depo = depo * base_price
tcm = self.get_trading_capability_manager()
delta_price = (over_price * base_price) / (100 * (self.order_q - 1))
avg_amount = 0.0
for i in range(self.order_q):
price_k = 1 - math.log(self.order_q - i, self.order_q)
if buy_side:
price = base_price - i * delta_price * price_k
else:
price = base_price + i * delta_price * price_k
price = tcm.round_price(price, RoundingType.ROUND)
amount = depo * pow(martin_float, i) * (1 - martin_float) / (1 - pow(martin_float, self.order_q))
amount = tcm.round_amount(amount, RoundingType.FLOOR)
# print(f"calc_grid_avg.amount: {amount}")
if buy_side:
amount /= price
avg_amount += amount
else:
avg_amount += amount * price
return avg_amount
def start_process(self):
# Init analytic
self.connection_analytic = self.connection_analytic or sqlite3.connect(WORK_PATH + 'funds_rate.db',
check_same_thread=False)
# Create processes for save to .db and send Telegram message
self.pr_db = Thread(target=save_to_db, args=(self.queue_to_db,))
self.pr_tlg = Thread(target=telegram, args=(self.queue_to_tlg, self.tlg_header.split('.')[0],))
if not self.pr_db.is_alive():
print('Start process for .db save')
try:
self.pr_db.start()
except AssertionError as error:
self.message_log(str(error), log_level=LogLevel.ERROR, color=Style.B_RED)
if not self.pr_tlg.is_alive():
print('Start process for Telegram')
try:
self.pr_tlg.start()
except AssertionError as error:
self.message_log(str(error), log_level=LogLevel.ERROR, color=Style.B_RED)
def fee_for_grid(self, amount_first: Decimal, amount_second: Decimal,
by_market: bool = False) -> (Decimal, Decimal):
"""
Calculate trade amount with Fee for grid order for both currency
"""
if FEE_IN_PAIR:
if by_market:
fee = FEE_TAKER
else:
fee = FEE_MAKER
if FEE_BNB_IN_PAIR:
if self.cycle_buy:
amount_first -= self.round_truncate(fee * amount_first / 100, base=True, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"For grid order First - fee: {amount_first}", log_level=LogLevel.DEBUG)
else:
amount_first += self.round_truncate(fee * amount_first / 100, base=True, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"For grid order First + fee: {amount_first}", log_level=LogLevel.DEBUG)
else:
if self.cycle_buy:
if FEE_SECOND:
amount_second += self.round_truncate(fee * amount_second / 100, base=False, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"For grid order Second + fee: {amount_second}", log_level=LogLevel.DEBUG)
else:
amount_first -= self.round_truncate(fee * amount_first / 100, base=True, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"For grid order First - fee: {amount_first}", log_level=LogLevel.DEBUG)
else:
amount_second -= self.round_truncate(fee * amount_second / 100, base=False, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"For grid order Second - fee: {amount_second}", log_level=LogLevel.DEBUG)
return amount_first, amount_second
def fee_for_tp(self, amount_first: Decimal, amount_second: Decimal, by_market: bool = False) -> (Decimal, Decimal):
"""
Calculate trade amount with Fee for take profit order for both currency
"""
if by_market:
fee = FEE_TAKER
else:
fee = FEE_MAKER
if FEE_IN_PAIR:
if FEE_BNB_IN_PAIR:
if self.cycle_buy:
amount_first += self.round_truncate(fee * amount_first / 100, base=True, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"Take profit order First + fee: {amount_first}", log_level=LogLevel.DEBUG)
else:
amount_first -= self.round_truncate(fee * amount_first / 100, base=True, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"Take profit order First - fee: {amount_first}", log_level=LogLevel.DEBUG)
else:
if self.cycle_buy:
amount_second -= self.round_truncate(fee * amount_second / 100, base=False, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"Take profit order Second - fee: {amount_second}", log_level=LogLevel.DEBUG)
else:
if FEE_SECOND:
amount_second += self.round_truncate(fee * amount_second / 100, base=False, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"Take profit order Second + fee: {amount_second}", log_level=LogLevel.DEBUG)
else:
amount_first -= self.round_truncate(fee * amount_first / 100, base=True, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"Take profit order First - fee: {amount_first}", log_level=LogLevel.DEBUG)
elif self.reverse:
if self.cycle_buy:
amount_second -= self.round_truncate(2 * fee * amount_second / 100, base=False, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"Take profit order Second - fee: {amount_second}", log_level=LogLevel.DEBUG)
else:
amount_first -= self.round_truncate(2 * fee * amount_first / 100, base=True, fee=True,
_rounding=ROUND_CEILING)
self.message_log(f"Take profit order First - fee: {amount_first}", log_level=LogLevel.DEBUG)
return self.round_truncate(amount_first, base=True), self.round_truncate(amount_second, base=False)
def after_filled_tp(self, one_else_grid: bool = False):
"""
After filling take profit order calculate profit, deposit and restart or place additional TP
"""
# noinspection PyTupleAssignmentBalance
amount_first, amount_second, by_market = self.tp_was_filled # skipcq: PYL-W0632
self.message_log(f"after_filled_tp: amount_first: {amount_first}, amount_second: {amount_second},"
f" by_market: {by_market}, tp_amount: {self.tp_amount}, tp_target: {self.tp_target}"
f" one_else_grid: {one_else_grid}", log_level=LogLevel.DEBUG)
self.debug_output()
amount_first_fee, amount_second_fee = self.fee_for_tp(amount_first, amount_second, by_market)
# Calculate cycle and total profit, refresh depo
if self.cycle_buy:
profit_second = self.round_truncate(amount_second_fee - self.tp_amount, base=False)
profit_reverse = self.round_truncate(PROFIT_REVERSE * profit_second if self.reverse else Decimal('0'),
base=False)
profit_second -= profit_reverse
self.profit_second += profit_second
self.part_profit_second = Decimal('0')
self.message_log(f"Cycle profit second {self.profit_second} + {profit_reverse}")
else:
profit_first = self.round_truncate(amount_first_fee - self.tp_amount, base=True)
profit_reverse = self.round_truncate(PROFIT_REVERSE * profit_first if self.reverse else Decimal('0'),
base=True)
profit_first -= profit_reverse
self.profit_first += profit_first
self.part_profit_first = Decimal('0')
self.message_log(f"Cycle profit first {self.profit_first} + {profit_reverse}")
transfer_sum_amount_first = Decimal('0')
transfer_sum_amount_second = Decimal('0')
if one_else_grid:
self.message_log("Some grid orders was execute after TP was filled", tlg=True)
# Correction sum_amount
self.message_log(f"Before Correction: Sum_amount_first: {self.sum_amount_first},"
f" Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
self.sum_amount_first -= self.tp_init[0]
self.sum_amount_second -= self.tp_init[1]
self.message_log(f"Sum_amount_first: {self.sum_amount_first}, Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
self.tp_was_filled = ()
# Return depo in turnover without loss
tcm = self.get_trading_capability_manager()
if self.cycle_buy:
min_trade_amount = tcm.get_min_buy_amount(float(self.avg_rate))
amount = self.tp_init[1]
if self.reverse:
reverse_target_amount = (float2decimal(self.reverse_target_amount) *
self.tp_init[0] / self.reverse_init_amount)
else:
reverse_target_amount = self.tp_init[0] + (FEE_MAKER * 2 + PROFIT) * self.tp_init[0] / 100
first_order_vlm = amount * 1 * (1 - self.martin) / (1 - self.martin ** ORDER_Q)
first_order_vlm /= self.avg_rate
else:
min_trade_amount = tcm.get_min_sell_amount(float(self.avg_rate))
amount = self.tp_init[0]
if self.reverse:
reverse_target_amount = (float2decimal(self.reverse_target_amount) *
self.tp_init[1] / self.reverse_init_amount)
else:
reverse_target_amount = self.tp_init[1] + (FEE_MAKER * 2 + PROFIT) * self.tp_init[1] / 100
first_order_vlm = amount * 1 * (1 - self.martin) / (1 - self.martin ** ORDER_Q)
self.message_log(f"Min trade amount is: {min_trade_amount}")
self.debug_output()
self.message_log(f"For additional grid amount: {amount}, reverse_target_amount: {reverse_target_amount}",
tlg=True)
if float(first_order_vlm) > min_trade_amount:
self.message_log("Place additional grid orders and replace TP", tlg=True)
self.tp_hold_additional = True
self.place_grid(self.cycle_buy, amount, float(reverse_target_amount), allow_grid_shift=False)
return
if float(amount) > min_trade_amount: # skipcq: PYL-R1705
self.message_log("Too small amount for place additional grid, correct grid and replace TP", tlg=True)
if self.orders_hold:
order = self.orders_hold.orders_list.pop()
order['amount'] += self.tp_init[1] if self.cycle_buy else self.tp_init[0]
self.message_log(f"Corrected amount of last hold grid order: {order['amount']}")
self.orders_hold.orders_list.append(order)
elif self.orders_grid:
order = self.orders_grid.orders_list[-1]
order['amount'] = self.tp_init[1] if self.cycle_buy else self.tp_init[0]
self.message_log(f"Additional grid order for buy: {order['buy']},"
f" {order['amount']} by {order['price']}")
waiting_order_id = self.place_limit_order(order['buy'], float(order['amount']),
float(order['price']))
self.orders_init.append(waiting_order_id, order['buy'], order['amount'], order['price'])
else:
self.message_log(f"Additional grid order for buy: {self.cycle_buy},"
f" {amount} by {reverse_target_amount / amount}")
waiting_order_id = self.place_limit_order(self.cycle_buy, float(amount),
float(reverse_target_amount / amount))
self.orders_init.append(waiting_order_id, self.cycle_buy, amount, reverse_target_amount / amount)
self.place_profit_order(by_market=by_market)
return
else:
self.message_log("Too small for trade, transfer filled amount to the next cycle", tlg=True)
transfer_sum_amount_first = self.sum_amount_first
transfer_sum_amount_second = self.sum_amount_second
if self.cycle_buy:
self.deposit_second += self.profit_second - transfer_sum_amount_second
if self.reverse:
self.sum_profit_second += profit_reverse
self.initial_reverse_first += transfer_sum_amount_first
self.initial_reverse_second += profit_reverse - transfer_sum_amount_second
else:
# Take full profit only for non-reverse cycle
self.sum_profit_second += self.profit_second
self.initial_first += transfer_sum_amount_first
self.initial_second += self.profit_second - transfer_sum_amount_second
self.message_log(f"after_filled_tp: new initial_funding:"
f" {self.initial_reverse_second if self.reverse else self.initial_second}",
log_level=LogLevel.DEBUG)
self.cycle_buy_count += 1
else:
self.deposit_first += self.profit_first - transfer_sum_amount_first
if self.reverse:
self.sum_profit_first += profit_reverse
self.initial_reverse_first += profit_reverse - transfer_sum_amount_first
self.initial_reverse_second += transfer_sum_amount_second
else:
# Take full account profit only for non-reverse cycle
self.sum_profit_first += self.profit_first
self.initial_first += self.profit_first - transfer_sum_amount_first
self.initial_second += transfer_sum_amount_second
self.message_log(f"after_filled_tp: new initial_funding:"
f" {self.initial_reverse_first if self.reverse else self.initial_first}",
log_level=LogLevel.DEBUG)
self.cycle_sell_count += 1
if (not self.cycle_buy and self.profit_first < 0) or (self.cycle_buy and self.profit_second < 0):
self.message_log("Strategy have a negative cycle result, STOP", log_level=LogLevel.CRITICAL)
self.command = 'end'
self.cancel_grid()
else:
self.message_log("Restart after filling take profit order", tlg=False)
self.debug_output()
self.restart = True
self.sum_amount_first = transfer_sum_amount_first
self.sum_amount_second = transfer_sum_amount_second
self.part_amount_first = Decimal('0')
self.part_amount_second = Decimal('0')
self.correction_amount_first = Decimal('0')
self.correction_amount_second = Decimal('0')
self.tp_part_amount_first = Decimal('0')
self.tp_part_amount_second = Decimal('0')
self.start()
def reverse_after_grid_ending(self):
self.message_log("reverse_after_grid_ending:", log_level=LogLevel.DEBUG)
self.debug_output()
if self.reverse:
self.message_log('End reverse cycle', tlg=True)
self.reverse = False
self.restart = True
# Calculate profit and time for Reverse cycle
self.cycle_time = self.cycle_time_reverse
if self.cycle_buy:
self.profit_first += self.round_truncate(self.sum_amount_first - self.reverse_init_amount +
self.tp_part_amount_first, base=True)
self.deposit_first += self.round_truncate(self.profit_first - self.tp_part_amount_first, base=True)
self.initial_first += self.round_truncate(self.profit_first - self.tp_part_amount_first, base=True)
self.message_log(f"Reverse cycle profit first {self.profit_first}")
self.sum_profit_first += self.profit_first
self.cycle_sell_count += 1
else:
self.profit_second += self.round_truncate(self.sum_amount_second - self.reverse_init_amount +
self.tp_part_amount_second, base=False)
self.deposit_second += self.round_truncate(self.profit_second - self.tp_part_amount_second, base=False)
self.initial_second += self.round_truncate(self.profit_second - self.tp_part_amount_second, base=False)
self.message_log(f"Reverse cycle profit second {self.profit_second}")
self.sum_profit_second += self.profit_second
self.cycle_buy_count += 1
self.cycle_time_reverse = None
self.reverse_target_amount = None
self.reverse_init_amount = Decimal('0')
self.initial_reverse_first = Decimal('0')
self.initial_reverse_second = Decimal('0')
self.command = 'stop' if REVERSE_STOP and REVERSE else self.command
if (self.cycle_buy and self.profit_first <= 0) or (not self.cycle_buy and self.profit_second <= 0):
self.message_log("Strategy have a negative cycle result, STOP", log_level=LogLevel.CRITICAL)
self.command = 'end'
else:
try:
adx = self.adx(ADX_CANDLE_SIZE_IN_MINUTES, ADX_NUMBER_OF_CANDLES, ADX_PERIOD)
except ZeroDivisionError:
trend_up = True
trend_down = True
else:
trend_up = adx.get('adx') > ADX_THRESHOLD and adx.get('+DI') > adx.get('-DI')
trend_down = adx.get('adx') > ADX_THRESHOLD and adx.get('-DI') > adx.get('+DI')
# print('adx: {}, +DI: {}, -DI: {}'.format(adx.get('adx'), adx.get('+DI'), adx.get('-DI')))
self.cycle_time_reverse = self.cycle_time or datetime.utcnow()
# Calculate target return amount
tp = self.calc_profit_order(not self.cycle_buy)
if self.cycle_buy:
self.deposit_first = self.round_truncate(self.sum_amount_first, base=True) - self.tp_part_amount_first
self.reverse_target_amount = float(tp.get('amount') * tp.get('price') - self.tp_part_amount_second)
self.reverse_init_amount = self.sum_amount_second - self.tp_part_amount_second
self.initial_reverse_first = self.initial_first + self.sum_amount_first
self.initial_reverse_second = self.initial_second - self.sum_amount_second
self.message_log(f"Depo for Reverse cycle first: {self.deposit_first}", log_level=LogLevel.DEBUG,
color=Style.B_WHITE)
else:
self.deposit_second = (self.round_truncate(self.sum_amount_second, base=False)
- self.tp_part_amount_second)
self.reverse_target_amount = float(tp.get('amount') - self.tp_part_amount_first)
self.reverse_init_amount = self.sum_amount_first - self.tp_part_amount_first
self.initial_reverse_first = self.initial_first - self.sum_amount_first
self.initial_reverse_second = self.initial_second + self.sum_amount_second
self.message_log(f"Depo for Reverse cycle second: {self.deposit_second}", log_level=LogLevel.DEBUG,
color=Style.B_WHITE)
self.message_log(f"Actual depo for initial cycle was: {self.reverse_init_amount}", log_level=LogLevel.DEBUG)
self.message_log(f"For Reverse cycle set target return amount: {self.reverse_target_amount}"
f" with profit: {tp.get('profit')}%", color=Style.B_WHITE)
self.debug_output()
if (self.cycle_buy and trend_down) or (not self.cycle_buy and trend_up):
self.message_log('Start reverse cycle', tlg=True)
self.reverse = True
self.command = 'stop' if REVERSE_STOP else None
else:
self.message_log('Hold reverse cycle', color=Style.B_WHITE)
self.reverse_price = self.get_buffered_ticker().last_price
self.reverse_hold = True
self.place_profit_order()
if not self.reverse_hold:
# Reverse
self.cycle_buy = not self.cycle_buy
self.sum_amount_first = self.tp_part_amount_first
self.sum_amount_second = self.tp_part_amount_second
self.debug_output()
self.part_amount_first = Decimal('0')
self.part_amount_second = Decimal('0')
self.tp_part_amount_first = Decimal('0')
self.tp_part_amount_second = Decimal('0')
self.start()
def place_grid_part(self) -> None:
self.grid_place_flag = True
k = 0
n = len(self.orders_grid) + len(self.orders_init)
for i in self.orders_hold:
if k == GRID_MAX_COUNT or k + n >= ORDER_Q:
if k + n >= ORDER_Q:
self.order_q_placed = True
break
waiting_order_id = self.place_limit_order(i['buy'], float(i['amount']), float(i['price']))
self.orders_init.append(waiting_order_id, i['buy'], i['amount'], i['price'])
k += 1
del self.orders_hold.orders_list[:k]
def grid_only_stop(self) -> None:
tcm = self.get_trading_capability_manager()
self.avg_rate = self.sum_amount_second / self.sum_amount_first
self.avg_rate = float2decimal(tcm.round_price(float(self.avg_rate), RoundingType.FLOOR))
if self.cycle_buy:
self.message_log('Stop after buy asset\n'
'Sell {} {}\n'
'Buy {} {}\n'
'Average rate is {}'
.format(self.sum_amount_second, self.s_currency,
self.sum_amount_first, self.f_currency, self.avg_rate), tlg=True)
else:
self.message_log('Stop after sell asset\n'
'Buy {} {}\n'
'Sell {} {}\n'
'Average rate is {}'
.format(self.sum_amount_second, self.s_currency,
self.sum_amount_first, self.f_currency, self.avg_rate), tlg=True)
self.command = 'stop'
self.restart = True
self.start()
def grid_handler(self, _amount_first=None, _amount_second=None, by_market: bool = False,
after_full_fill: bool = True) -> None:
"""
Handler after filling grid order
"""
if after_full_fill and _amount_first:
# Calculate trade amount with Fee
amount_first_fee, amount_second_fee = self.fee_for_grid(_amount_first, _amount_second, by_market)
# Calculate cycle sum trading for both currency
self.sum_amount_first += amount_first_fee + self.part_amount_first
self.sum_amount_second += amount_second_fee + self.part_amount_second
self.part_amount_first = Decimal('0')
self.part_amount_second = Decimal('0')
self.message_log(f"Sum_amount_first: {self.sum_amount_first},"
f" Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
# State
no_grid = not self.orders_grid and not self.orders_hold and not self.orders_init
if no_grid and not self.orders_save:
if self.tp_order_id:
self.tp_hold = False
self.tp_cancel_from_grid_handler = True
if not self.cancel_order_id:
self.cancel_order_id = self.tp_order_id
self.cancel_order(self.tp_order_id)
return
if self.tp_wait_id:
# Wait tp order and cancel in on_cancel_order_success and restart
self.tp_cancel_from_grid_handler = True
return
if GRID_ONLY:
self.grid_only_stop()
elif (self.tp_part_amount_first or self.tp_part_amount_second
or self.correction_amount_first or self.correction_amount_second):
self.message_log("grid_handler: No grid orders after part filled TP, converse TP to grid", tlg=True)
# Correction sum_amount
self.message_log(f"Before Correction: Sum_amount_first: {self.sum_amount_first},"
f" Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
self.sum_amount_first -= self.tp_part_amount_first
self.sum_amount_second -= self.tp_part_amount_second
self.message_log(f"Sum_amount_first: {self.sum_amount_first},"
f" Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
self.tp_part_amount_first += self.correction_amount_first
self.tp_part_amount_second += self.correction_amount_second
self.correction_amount_first = Decimal('0')
self.correction_amount_second = Decimal('0')
tcm = self.get_trading_capability_manager()
# Return depo in turnover without loss
self.message_log(f"Saved TP part amount was: first: {self.tp_part_amount_first},"
f" second: {self.tp_part_amount_second}", log_level=LogLevel.DEBUG)
_amount_f = self.tp_part_amount_first
_amount_s = self.tp_part_amount_second
self.tp_part_amount_first = Decimal('0')
self.tp_part_amount_second = Decimal('0')
if self.cycle_buy:
min_trade_amount = tcm.get_min_buy_amount(float(self.avg_rate))
amount = _amount_s
if self.reverse:
reverse_target_amount = (float2decimal(self.reverse_target_amount) *
_amount_f / self.reverse_init_amount)
else:
reverse_target_amount = _amount_f + (FEE_MAKER * 2 + PROFIT) * _amount_f / 100
first_order_vlm = amount * 1 * (1 - self.martin) / (1 - self.martin**ORDER_Q)
first_order_vlm /= self.avg_rate
else:
min_trade_amount = tcm.get_min_sell_amount(float(self.avg_rate))
amount = _amount_f
if self.reverse:
reverse_target_amount = (float2decimal(self.reverse_target_amount) *
_amount_s / self.reverse_init_amount)
else:
reverse_target_amount = _amount_s + (FEE_MAKER * 2 + PROFIT) * _amount_s / 100
first_order_vlm = amount * 1 * (1 - self.martin) / (1 - self.martin**ORDER_Q)
self.message_log(f"Min trade amount is: {min_trade_amount}")
self.debug_output()
self.message_log(f"For additional grid amount: {amount},"
f" reverse_target_amount: {reverse_target_amount}", tlg=True)
if float(first_order_vlm) > min_trade_amount:
self.message_log("Place additional grid orders", tlg=True)
self.place_grid(self.cycle_buy, amount, float(reverse_target_amount), allow_grid_shift=False)
return
if float(amount) > min_trade_amount:
self.message_log("Too small amount for place additional grid, correct grid", tlg=True)
if self.orders_hold:
order = self.orders_hold.orders_list.pop()
order['amount'] += self.tp_init[1] if self.cycle_buy else self.tp_init[0]
self.message_log(f"Corrected amount of last hold grid order: {order['amount']}")
self.orders_hold.orders_list.append(order)
elif self.orders_grid:
order = self.orders_grid.orders_list[-1]
order['amount'] = self.tp_init[1] if self.cycle_buy else self.tp_init[0]
self.message_log(f"Additional grid order for buy: {order['buy']},"
f" {order['amount']} by {order['price']}")
waiting_order_id = self.place_limit_order(order['buy'], float(order['amount']),
float(order['price']))
self.orders_init.append(waiting_order_id, order['buy'], order['amount'], order['price'])
else:
self.message_log(f"Additional grid order for buy: {self.cycle_buy},"
f" {amount} by {reverse_target_amount / amount}")
waiting_order_id = self.place_limit_order(self.cycle_buy, float(amount),
float(reverse_target_amount / amount))
self.orders_init.append(waiting_order_id, self.cycle_buy, amount,
reverse_target_amount / amount)
return
elif self.tp_was_filled:
self.message_log("grid_handler: Was filled TP and all grid orders, converse TP to grid", tlg=True)
self.after_filled_tp(one_else_grid=True)
else:
# Ended grid order, calculate depo and Reverse
self.reverse_after_grid_ending()
else:
if self.orders_save:
self.grid_remove = False
self.start_hold = False
self.message_log("grid_handler: Restore deleted and unplaced grid orders")
self.orders_hold.orders_list.extend(self.orders_save)
# Sort restored hold orders
if self.cycle_buy:
self.orders_hold.orders_list.sort(key=lambda x: x['price'], reverse=True)
else:
self.orders_hold.orders_list.sort(key=lambda x: x['price'], reverse=False)
self.orders_save.orders_list.clear()
self.order_q_placed = False
if after_full_fill and self.orders_hold and self.order_q_placed:
# PLace one hold grid order and remove it from hold list
_buy, _amount, _price = self.orders_hold.get_first()
waiting_order_id = self.place_limit_order(_buy, float(_amount), float(_price))
self.orders_init.append(waiting_order_id, _buy, _amount, _price)
del self.orders_hold.orders_list[0]
# Exist filled but non processing TP
if self.tp_was_filled:
self.after_filled_tp(one_else_grid=True)
else:
self.place_profit_order(by_market)
def cancel_grid(self):
"""
Atomic cancel grid orders. Before start() all grid orders must be confirmed canceled
"""
if self.grid_remove is None:
self.message_log("cancel_grid: Started", log_level=LogLevel.DEBUG)
self.grid_remove = True
if self.grid_remove:
self.message_log("cancel_grid:", log_level=LogLevel.DEBUG)
# Temporary save and clear hold orders avoid placing them
if self.orders_hold:
self.orders_save.orders_list.extend(self.orders_hold)
self.orders_hold.orders_list.clear()
if self.orders_init:
# Exist not accepted grid order(s), wait msg from exchange
self.start_hold = True
elif self.orders_grid:
# Sequential removal orders from grid and make this 'atomic'
# - on_cancel_order_success: save canceled order to orders_save
_id = self.orders_grid.orders_list[0]['id']
self.message_log(f"cancel_grid order: {_id}", log_level=LogLevel.DEBUG)
self.grid_order_canceled = _id
self.cancel_order(_id)
elif self.grid_remove:
self.message_log("cancel_grid: Ended", log_level=LogLevel.DEBUG)
self.grid_remove = None
self.orders_save.orders_list.clear()
if self.tp_was_filled:
self.after_filled_tp(one_else_grid=False)
else:
self.start()
else:
self.grid_remove = None
else:
self.grid_remove = None
def round_truncate(self, _x: Decimal, base: bool, fee: bool = False, _rounding=ROUND_FLOOR) -> Decimal:
round_pattern = "1.01234567" if fee else self.round_base if base else self.round_quote
xr = _x.quantize(Decimal(round_pattern), rounding=_rounding)
return xr
def debug_output(self):
self.message_log(f"\n"
f"! =======================================\n"
f"! debug output:\n"
f"! sum_amount_first: {self.sum_amount_first}, sum_amount_second: {self.sum_amount_second}\n"
f"! part_amount_first: {self.part_amount_first},"
f" part_amount_second: {self.part_amount_second}\n"
f"! initial_first: {self.initial_first}, initial_second: {self.initial_second}\n"
f"! initial_reverse_first: {self.initial_reverse_first},"
f" initial_reverse_second: {self.initial_reverse_second}\n"
f"! reverse_init_amount: {self.reverse_init_amount}\n"
f"! reverse_target_amount: {self.reverse_target_amount}\n"
f"! correction_amount_first: {self.correction_amount_first},"
f" correction_amount_second: {self.correction_amount_second}\n"
f"! tp_order: {self.tp_order}\n"
f"! tp_part_amount_first: {self.tp_part_amount_first},"
f" tp_part_amount_second: {self.tp_part_amount_second}\n"
f"! profit_first: {self.profit_first}, profit_second: {self.profit_second}\n"
f"! part_profit_first: {self.part_profit_first},"
f" part_profit_second: {self.part_profit_second}\n"
f"! deposit_first: {self.deposit_first}, deposit_second: {self.deposit_second}\n"
f"! command: {self.command}\n"
f"! reverse: {self.reverse}\n"
f"! ======================================")
def check_order_status(self):
market_orders = self.get_buffered_open_orders()
market_orders_id = []
for order in market_orders:
market_orders_id.append(order.id)
strategy_orders_id = self.orders_grid.get_id_list()
if self.tp_order_id and not self.cancel_order_id:
strategy_orders_id.append(self.tp_order_id)
if self.grid_order_canceled:
try:
strategy_orders_id.remove(self.grid_order_canceled)
except ValueError:
pass
diff_id = list(set(strategy_orders_id).difference(market_orders_id))
if diff_id:
self.message_log(f"Orders not present on exchange: {diff_id}", tlg=True)
if diff_id.count(self.tp_order_id):
diff_id.remove(self.tp_order_id)
amount_first = float2decimal(self.tp_order[1])
amount_second = float2decimal(self.tp_order[1]) * float2decimal(self.tp_order[2])
self.tp_was_filled = (amount_first, amount_second, True)
self.tp_order_id = None
self.tp_order = ()
self.message_log(f"Was filled TP: {self.tp_was_filled}", log_level=LogLevel.DEBUG)
if diff_id:
self.shift_grid_threshold = None
amount_first = Decimal('0')
amount_second = Decimal('0')
for _id in diff_id:
_buy, _amount, _price = self.orders_grid.get_by_id(_id)
self.orders_grid.remove(_id)
amount_first += float2decimal(_amount)
amount_second += float2decimal(_amount) * float2decimal(_price)
self.avg_rate = amount_second / amount_first
self.message_log(f"Grid amount: First: {amount_first}, Second: {amount_second},"
f" price: {self.avg_rate}")
self.grid_handler(_amount_first=amount_first, _amount_second=amount_second,
after_full_fill=True)
elif self.tp_was_filled:
self.cancel_grid()
def check_min_amount_for_tp(self) -> bool:
res = False
if self.avg_rate:
tcm = self.get_trading_capability_manager()
if self.cycle_buy:
min_trade_amount = float2decimal(tcm.get_min_sell_amount(float(self.avg_rate)))
amount = self.sum_amount_first
amount = self.round_truncate(amount, base=True)
if amount:
self.message_log(f"Sell amount: {amount}, min sell amount: {min_trade_amount}",
log_level=LogLevel.DEBUG)
else:
min_trade_amount = float2decimal(tcm.get_min_buy_amount(float(self.avg_rate)))
amount = self.sum_amount_second / self.avg_rate
amount = self.round_truncate(amount, base=True)
if amount:
self.message_log(f"Buy amount: {amount}, min buy amount: {min_trade_amount}",
log_level=LogLevel.DEBUG)
if amount >= min_trade_amount:
res = True
return res
##############################################################
# public data update methods
##############################################################
def on_new_ticker(self, ticker: Ticker) -> None:
# print(f"on_new_ticker:ticker.last_price: {ticker.last_price}")
if (self.shift_grid_threshold and self.last_shift_time and time.time() - self.last_shift_time > SHIFT_GRID_DELAY
and ((self.cycle_buy and ticker.last_price >= self.shift_grid_threshold)
or
(not self.cycle_buy and ticker.last_price <= self.shift_grid_threshold))):
if not STANDALONE and EXTRA_CHECK_ORDER_STATE:
self.check_order_status()
if self.shift_grid_threshold:
self.message_log('Shift grid', color=Style.B_WHITE)
self.shift_grid_threshold = None
self.start_after_shift = True
if self.part_amount_first != 0 or self.part_amount_second != 0:
self.message_log("Grid order was small partially filled, correct depo")
if self.cycle_buy:
self.deposit_second += self.round_truncate(self.part_amount_second, base=False)
self.message_log(f"New second depo: {self.deposit_second}")
else:
self.deposit_first += self.round_truncate(self.part_amount_first, base=True)
self.message_log(f"New first depo: {self.deposit_first}")
self.part_amount_first = Decimal('0')
self.part_amount_second = Decimal('0')
self.grid_remove = None
self.cancel_grid()
def on_new_order_book(self, order_book: OrderBook) -> None:
# print(f"on_new_order_book: max_bids: {order_book.bids[0].price}, min_asks: {order_book.asks[0].price}")
pass
def on_new_funds(self, funds: Dict[str, FundsEntry]) -> None:
# print(f"on_new_funds.funds: {funds}")
ff = funds.get(self.f_currency, 0)
fs = funds.get(self.s_currency, 0)
if self.wait_refunding_for_start:
ff = float2decimal(ff.total_for_currency) if ff else Decimal('0.0')
fs = float2decimal(fs.total_for_currency) if fs else Decimal('0.0')
if self.reverse:
delta_f = ff - self.initial_reverse_first
delta_s = fs - self.initial_reverse_second
else:
delta_f = ff - self.initial_first
delta_s = fs - self.initial_second
delta = delta_f * self.avg_rate + delta_s
tcm = self.get_trading_capability_manager()
min_delta = float2decimal(0.1 * tcm.get_min_buy_amount(float(self.avg_rate))) * self.avg_rate
if delta > 0 or delta.copy_abs() < min_delta:
self.start()
return
if self.tp_order_hold:
if self.tp_order_hold['buy_side']:
available_fund = float2decimal(fs.available) if fs else Decimal('0.0')
else:
available_fund = float2decimal(ff.available) if ff else Decimal('0.0')
if available_fund >= self.tp_order_hold['amount']:
self.place_profit_order()
return
if self.grid_hold:
if self.grid_hold['buy_side']:
available_fund = float2decimal(fs.available) if fs else Decimal('0.0')
else:
available_fund = float2decimal(ff.available) if ff else Decimal('0.0')
if available_fund >= self.grid_hold['depo']:
self.place_grid(self.grid_hold['buy_side'],
self.grid_hold['depo'], self.reverse_target_amount)
##############################################################
# private update methods
##############################################################
def on_order_update(self, update: OrderUpdate) -> None:
# self.message_log(f"Order {update.original_order.id}: {update.status}", log_level=LogLevel.DEBUG)
if update.status in [OrderUpdate.ADAPTED,
OrderUpdate.NO_CHANGE,
OrderUpdate.REAPPEARED,
OrderUpdate.DISAPPEARED,
OrderUpdate.CANCELED,
OrderUpdate.OTHER_CHANGE]:
pass
else:
self.message_log(f"Order {update.original_order.id}: {update.status}", color=Style.B_WHITE)
result_trades = update.resulting_trades
amount_first = Decimal('0')
amount_second = Decimal('0')
if update.status == OrderUpdate.PARTIALLY_FILLED:
# Get last trade row
if result_trades:
i = result_trades[-1]
amount_first = float2decimal(i.amount)
amount_second = float2decimal(i.amount) * float2decimal(i.price)
self.message_log(f"trade id={i.id}, first: {i.amount}, price: {i.price}", log_level=LogLevel.DEBUG)
else:
self.message_log(f"No records for {update.original_order.id}", log_level=LogLevel.WARNING)
else:
for i in result_trades:
# Calculate sum trade amount for both currency
amount_first += float2decimal(i.amount)
amount_second += float2decimal(i.amount) * float2decimal(i.price)
self.message_log(f"trade id={i.id}, first: {i.amount}, price: {i.price}", log_level=LogLevel.DEBUG)
# Retreat of courses
if amount_first == 0:
self.message_log(f"No amount for {update.original_order.id}", log_level=LogLevel.WARNING)
return
self.avg_rate = amount_second / amount_first
self.message_log(f"Executed amount: First: {amount_first}, Second: {amount_second}, price: {self.avg_rate}")
if update.status in (OrderUpdate.FILLED, OrderUpdate.ADAPTED_AND_FILLED):
self.shift_grid_threshold = None
if self.orders_grid.exist(update.original_order.id):
# Remove grid order with =id from order list
self.orders_grid.remove(update.original_order.id)
self.grid_handler(_amount_first=amount_first, _amount_second=amount_second, after_full_fill=True)
elif self.tp_order_id == update.original_order.id:
# Filled take profit order, restart
self.tp_order_id = None
self.cancel_order_id = None
self.tp_order = ()
if self.reverse_hold:
self.reverse_hold = False
self.cycle_time_reverse = None
self.initial_reverse_first = Decimal('0')
self.initial_reverse_second = Decimal('0')
self.message_log("Cancel hold reverse cycle", color=Style.B_WHITE, tlg=True)
self.tp_part_amount_first = Decimal('0')
self.tp_part_amount_second = Decimal('0')
self.tp_was_filled = (amount_first, amount_second, False)
# print(f"on_order_update.was_filled_tp: {self.tp_was_filled}")
if self.tp_hold:
# After place but before execute TP was filled some grid
self.tp_hold = False
self.after_filled_tp(one_else_grid=True)
elif self.tp_cancel_from_grid_handler:
self.tp_cancel_from_grid_handler = False
self.grid_handler()
else:
self.grid_remove = True
self.cancel_grid()
else:
self.message_log('Wild order, do not know it', tlg=True)
elif update.status == OrderUpdate.PARTIALLY_FILLED:
if self.tp_order_id == update.original_order.id:
self.message_log("Take profit partially filled", color=Style.B_WHITE)
amount_first_fee, amount_second_fee = self.fee_for_tp(amount_first, amount_second)
# Calculate profit for filled part TP
_profit_first = Decimal('0')
_profit_second = Decimal('0')
if self.cycle_buy:
_profit_second = self.round_truncate(((self.tp_target - self.tp_amount) * amount_second_fee /
self.tp_target), base=False)
self.part_profit_second += _profit_second
self.message_log(f"Part profit second {self.part_profit_second}", log_level=LogLevel.DEBUG)
else:
_profit_first = self.round_truncate(((self.tp_target - self.tp_amount) * amount_first_fee /
self.tp_target), base=True)
self.part_profit_first += _profit_first
self.message_log(f"Part profit first {self.part_profit_first}", log_level=LogLevel.DEBUG)
self.tp_part_amount_first += amount_first_fee - _profit_first
self.tp_part_amount_second += amount_second_fee - _profit_second
if self.reverse_hold:
self.message_log("Correct hold reverse cycle", color=Style.B_WHITE, tlg=False)
if self.cycle_buy:
self.message_log(f"Old: reverse_target_amount: {self.reverse_target_amount},"
f" deposit_first: {self.deposit_first},"
f" reverse_init_amount: {self.reverse_init_amount}",
log_level=LogLevel.DEBUG)
self.reverse_target_amount -= float(amount_second_fee)
self.deposit_first -= amount_first_fee
self.reverse_init_amount -= amount_second_fee
self.message_log(f"New: reverse_target_amount: {self.reverse_target_amount},"
f" deposit_first: {self.deposit_first},"
f" reverse_init_amount: {self.reverse_init_amount}",
log_level=LogLevel.DEBUG)
else:
self.message_log(f"Old: reverse_target_amount: {self.reverse_target_amount},"
f" deposit_second: {self.deposit_second},"
f" reverse_init_amount: {self.reverse_init_amount}",
log_level=LogLevel.DEBUG)
self.reverse_target_amount -= float(amount_first_fee)
self.deposit_second -= amount_second_fee
self.reverse_init_amount -= amount_first_fee
self.message_log(f"New: reverse_target_amount: {self.reverse_target_amount},"
f" deposit_second: {self.deposit_second},"
f" reverse_init_amount: {self.reverse_init_amount}",
log_level=LogLevel.DEBUG)
else:
self.message_log("Grid order partially filled", color=Style.B_WHITE)
amount_first_fee, amount_second_fee = self.fee_for_grid(amount_first, amount_second)
# Increase trade result and if next fill order is grid decrease trade result
self.sum_amount_first += amount_first_fee
self.sum_amount_second += amount_second_fee
self.message_log(f"Sum_amount_first: {self.sum_amount_first},"
f" Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
self.part_amount_first -= amount_first_fee
self.part_amount_second -= amount_second_fee
self.message_log(f"Part_amount_first: {self.part_amount_first},"
f" Part_amount_second: {self.part_amount_second}", log_level=LogLevel.DEBUG)
# Get min trade amount
if self.check_min_amount_for_tp():
self.shift_grid_threshold = None
self.grid_handler(after_full_fill=False)
else:
self.message_log("Partially trade too small, ignore", color=Style.B_WHITE)
def on_place_order_success(self, place_order_id: int, order: Order) -> None:
# print(f"on_place_order_success.place_order_id: {place_order_id}")
if order.remaining_amount == 0.0:
self.shift_grid_threshold = None
# Get actual parameter of last trade order
market_order = self.get_buffered_completed_trades()
amount_first = Decimal('0')
amount_second = Decimal('0')
self.message_log(f"Order {place_order_id} executed by market", color=Style.B_WHITE)
for o in market_order:
if o.order_id == order.id:
amount_first += float2decimal(o.amount)
amount_second += float2decimal(o.amount) * float2decimal(o.price)
if not amount_first:
amount_first += float2decimal(order.amount)
amount_second += float2decimal(order.amount) * float2decimal(order.price)
self.avg_rate = amount_second / amount_first
self.message_log(f"For {order.id} first: {amount_first}, second: {amount_second}, price: {self.avg_rate}")
if self.orders_init.exist(place_order_id):
self.message_log(f"Grid order {order.id} execute by market")
self.orders_init.remove(place_order_id)
self.message_log(f"Waiting order count is: {len(self.orders_init)}, hold: {len(self.orders_hold)}")
# Place take profit order
self.grid_handler(_amount_first=amount_first, _amount_second=amount_second,
by_market=True, after_full_fill=True)
elif place_order_id == self.tp_wait_id:
# Take profit order execute by market, restart
self.tp_wait_id = None
if self.reverse_hold:
self.reverse_hold = False
self.cycle_time_reverse = None
self.initial_reverse_first = Decimal('0')
self.initial_reverse_second = Decimal('0')
self.message_log("Cancel hold reverse cycle", color=Style.B_WHITE, tlg=True)
self.message_log(f"Take profit order {order.id} execute by market")
self.tp_was_filled = (amount_first, amount_second, True)
if self.tp_hold or self.tp_cancel_from_grid_handler:
self.tp_cancel_from_grid_handler = False
self.tp_hold = False
# After place but before accept TP was filled some grid
self.after_filled_tp(one_else_grid=True)
else:
self.grid_remove = True
self.cancel_grid()
else:
self.message_log('Did not have waiting order id for {}'.format(place_order_id),
LogLevel.ERROR, color=Style.B_RED)
else:
if self.orders_init.exist(place_order_id):
self.orders_grid.append(order.id, order.buy, float2decimal(order.amount), float2decimal(order.price))
if self.cycle_buy:
self.orders_grid.orders_list.sort(key=lambda x: x['price'], reverse=True)
else:
self.orders_grid.orders_list.sort(key=lambda x: x['price'], reverse=False)
# self.message_log(f"on_place_order_success.orders_grid", log_level=LogLevel.DEBUG)
# for i in self.orders_grid.orders_list: self.message_log(f"orders_grid: {i}", log_level=LogLevel.DEBUG)
self.orders_init.remove(place_order_id)
# self.message_log(f"Waiting order count is: {len(self.orders_init)}, hold: {len(self.orders_hold)}")
if not self.orders_init:
if GRID_ONLY and self.orders_hold:
# Place next part of grid orders
self.place_grid_part()
else:
if self.grid_place_flag or not self.orders_hold:
if self.orders_hold:
self.message_log(f"Part of grid orders are placed. Left: {len(self.orders_hold)}",
color=Style.B_WHITE)
else:
self.order_q_placed = True
self.message_log('All grid orders place successfully', color=Style.B_WHITE)
else:
self.last_shift_time = time.time()
if self.start_hold:
self.message_log('Release hold Start, continue remove grid orders', color=Style.B_WHITE)
self.start_hold = False
self.grid_remove = True
self.cancel_grid()
elif place_order_id == self.tp_wait_id:
self.tp_wait_id = None
self.tp_order_id = order.id
if self.tp_hold or self.tp_cancel or self.tp_cancel_from_grid_handler:
self.cancel_order_id = self.tp_order_id
self.cancel_order(self.tp_order_id)
else:
# Place next part of grid orders
if self.orders_hold and not self.order_q_placed and not self.orders_init:
self.message_log(f"Place next part of grid orders, hold {len(self.orders_hold)}")
self.place_grid_part()
else:
self.message_log(F"Did not have waiting order id for {place_order_id}", LogLevel.ERROR)
def on_place_order_error_string(self, place_order_id: int, error: str) -> None:
# Check all orders on exchange if exists required
open_orders = self.get_buffered_open_orders(True) # lgtm [py/call/wrong-arguments]
order = None
if self.orders_init.exist(place_order_id):
order = self.orders_init.find_order(open_orders, place_order_id)
elif place_order_id == self.tp_wait_id:
for k, o in enumerate(open_orders):
if o.buy == self.tp_order[0] and o.amount == self.tp_order[1] and o.price == self.tp_order[2]:
order = open_orders[k]
if order:
self.on_place_order_success(place_order_id, order)
else:
self.message_log(f"On place order {place_order_id} error: {error}", LogLevel.ERROR, tlg=True)
if self.orders_init.exist(place_order_id):
_buy, _amount, _price = self.orders_init.get_by_id(place_order_id)
self.orders_hold.append(place_order_id, _buy, _amount, _price)
# Sort restored hold orders
if self.cycle_buy:
self.orders_hold.orders_list.sort(key=lambda x: x['price'], reverse=True)
else:
self.orders_hold.orders_list.sort(key=lambda x: x['price'], reverse=False)
self.orders_init.remove(place_order_id)
elif place_order_id == self.tp_wait_id:
self.tp_wait_id = None
self.tp_error = True
def on_cancel_order_success(self, order_id: int, canceled_order: Order) -> None:
if self.orders_grid.exist(order_id):
self.part_amount_first = Decimal('0')
self.part_amount_second = Decimal('0')
self.grid_order_canceled = None
self.orders_grid.remove(order_id)
save = True
for o in self.get_buffered_completed_trades():
if o.order_id == order_id:
save = False
break
if save:
self.orders_save.append(canceled_order.id, canceled_order.buy, float2decimal(canceled_order.amount),
float2decimal(canceled_order.price))
self.cancel_grid()
elif order_id == self.cancel_order_id:
self.cancel_order_id = None
self.tp_order_id = None
self.tp_order = ()
if self.tp_cancel_from_grid_handler:
self.tp_cancel_from_grid_handler = False
self.grid_handler()
return
if self.tp_part_amount_first or self.tp_part_amount_second:
# Correct sum_amount
self.sum_amount_first -= self.tp_part_amount_first
self.sum_amount_second -= self.tp_part_amount_second
self.message_log(f"Canceled TP part amount: first: {self.tp_part_amount_first},"
f" second: {self.tp_part_amount_second}",
log_level=LogLevel.DEBUG)
self.message_log(f"Corrected Sum_amount_first: {self.sum_amount_first},"
f" Sum_amount_second: {self.sum_amount_second}",
log_level=LogLevel.DEBUG, color=Style.MAGENTA)
# Save correct amount
self.correction_amount_first += self.tp_part_amount_first
self.correction_amount_second += self.tp_part_amount_second
self.tp_part_amount_first = Decimal('0')
self.tp_part_amount_second = Decimal('0')
# Save part profit
self.profit_first += self.part_profit_first
self.part_profit_first = Decimal('0')
self.profit_second += self.part_profit_second
self.part_profit_second = Decimal('0')
if self.tp_hold:
self.tp_hold = False
self.place_profit_order()
return
if self.tp_cancel:
# Restart
self.tp_cancel = False
self.start()
def on_cancel_order_error_string(self, order_id: int, error: str) -> None:
# Check all orders on exchange if not exists required
open_orders = self.get_buffered_open_orders(True) # lgtm [py/call/wrong-arguments]
if any(i.id == order_id for i in open_orders):
self.message_log(f"On cancel order {order_id} {error}, try one else", LogLevel.ERROR)
self.cancel_order(order_id)
else:
self.message_log(f"On cancel order {order_id} {error}", LogLevel.ERROR)
if self.orders_grid.exist(order_id):
self.message_log("It's was grid order, probably filled", LogLevel.WARNING)
self.grid_order_canceled = None
_buy, _amount, _price = self.orders_grid.get_by_id(order_id)
amount_first = float2decimal(_amount)
amount_second = float2decimal(_amount) * float2decimal(_price)
self.avg_rate = amount_second / amount_first
self.message_log(f"Executed amount: First: {amount_first}, Second: {amount_second},"
f" price: {self.avg_rate}")
# Remove grid order with =id from order list
self.orders_grid.remove(order_id)
self.grid_handler(_amount_first=amount_first, _amount_second=amount_second, after_full_fill=True)
elif order_id == self.cancel_order_id:
self.message_log("It's was take profit", LogLevel.ERROR)
amount_first = float2decimal(self.tp_order[1])
amount_second = float2decimal(self.tp_order[1]) * float2decimal(self.tp_order[2])
self.tp_was_filled = (amount_first, amount_second, True)
self.tp_order_id = None
self.tp_order = ()
self.message_log(f"Was filled TP: {self.tp_was_filled}", log_level=LogLevel.DEBUG)
self.cancel_grid()
else:
self.message_log("It's unknown", LogLevel.ERROR)
|
test_insert.py
|
import time
import pdb
import threading
import logging
import threading
from multiprocessing import Pool, Process
import pytest
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
collection_id = "test_add"
ADD_TIMEOUT = 60
tag = "1970-01-01"
add_interval_time = 5
nb = 6000
class TestAddBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert assert_has_collection(connect, collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.insert(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert status.OK()
connect.flush([collection])
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
connect.flush([collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(index_param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
connect.flush([collection])
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
connect.flush([collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search(collection, 1, vector)
status, ids = connect.insert(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search(collection, 1, vector)
status, ids = connect.insert(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert status.OK()
connect.flush([collection])
status, result = connect.search(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
status, result = connect.search(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
connect.flush([collection])
status, result = connect.search(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
connect.flush([collection])
status, result = connect.search(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids(self, connect, collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.insert(collection, vectors, ids)
connect.flush([collection])
assert status.OK()
assert len(ids) == nq
status, result = connect.search(collection, top_k, query_records=vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_ids_no_ids(self, connect, collection):
'''
target: check the result of insert, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.insert(collection, vectors, ids)
assert status.OK()
status, ids = connect.insert(collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_not_ids_ids(self, connect, collection):
'''
target: check the result of insert, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.insert(collection, vectors)
assert status.OK()
status, ids = connect.insert(collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match(self, connect, collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.insert(collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_ids_invalid(self, connect, collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for _ in range(nq)]
with pytest.raises(Exception):
connect.insert(collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.insert(collection, vectors)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.insert(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition and add vectors in it
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.insert(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_not_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.insert(collection, vectors, partition_tag=tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_not_existed_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition, add vectors with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
new_tag = "new_tag"
status = connect.create_partition(collection, tag)
status, ids = connect.insert(collection, vectors, partition_tag=new_tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.insert(collection, vectors, partition_tag=tag)
for i in range(5):
status, ids = connect.insert(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.level(2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test add vectors without connection
method: create collection and add vectors in it, check if added successfully
expected: raise exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
with pytest.raises(Exception) as e:
status, ids = dis_connect.insert(collection, vectors)
def test_add_collection_not_existed(self, connect):
'''
target: test add vectors in collection, which not existed before
method: add vectors collection not existed, check the status
expected: status not ok
'''
nq = 5
vector = gen_single_vector(dim)
status, ids = connect.insert(gen_unique_str("not_exist_collection"), vector)
assert not status.OK()
assert not ids
def test_add_vector_dim_not_matched(self, connect, collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.insert(collection, vector)
assert not status.OK()
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.insert(collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.insert(collection, vectors)
connect.flush([collection])
status, result = connect.search(collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_add_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and add vectors in it(idmap),
assert the value returned by count_entities method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
vectors = gen_vectors(nb, dim)
def add(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
# milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
# assert milvus
status, result = milvus.insert(collection, records=vectors)
assert status.OK()
status = milvus.flush([collection])
assert status.OK()
for i in range(thread_num):
x = threading.Thread(target=add, args=(i, ))
threads.append(x)
x.start()
for th in threads:
th.join()
status, res = milvus.count_entities(collection)
assert res == thread_num * nb
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of L2
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
for j in range(5):
for i in range(20):
status, ids = connect.insert(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, status, result):
logging.getLogger().info("In callback check status")
assert status.OK()
def check_status_not_ok(self, status, result):
logging.getLogger().info("In callback check status")
assert not status.OK()
def test_insert_async(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
future = connect.insert(collection, insert_vec_list, _async=True)
status, ids = future.result()
connect.flush([collection])
assert len(ids) == nb
assert status.OK()
@pytest.mark.level(2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.insert(collection, insert_vec_list, _async=False)
connect.flush([collection])
assert len(ids) == nb
assert status.OK()
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
future = connect.insert(collection, insert_vec_list, _async=True, _callback=self.check_status)
future.done()
@pytest.mark.level(2)
def test_insert_async_long(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
insert_vec_list = gen_vectors(nb, dim)
future = connect.insert(collection, insert_vec_list, _async=True, _callback=self.check_status)
status, result = future.result()
assert status.OK()
assert len(result) == nb
connect.flush([collection])
status, count = connect.count_entities(collection)
assert status.OK()
logging.getLogger().info(status)
logging.getLogger().info(count)
assert count == nb
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
insert_vec_list = gen_vectors(nb, dim)
future = connect.insert(collection, insert_vec_list, _async=True, _callback=self.check_status, timeout=1)
future.done()
def test_insert_async_invalid_params(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
insert_vec_list = gen_vectors(nb, dim)
collection_new = gen_unique_str()
future = connect.insert(collection_new, insert_vec_list, _async=True)
status, result = future.result()
assert not status.OK()
# TODO: add assertion
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
insert_vec_list = []
collection_new = gen_unique_str()
with pytest.raises(Exception) as e:
future = connect.insert(collection_new, insert_vec_list, _async=True)
class TestAddIP:
"""
******************************************************************
The following cases are used to test `insert / index / search / delete` mixed function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, ip_collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
param = {'collection_name': ip_collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, ip_collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
assert assert_has_collection(connect, ip_collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, ip_collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.insert(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': 'test_add_vector_delete_another_collection',
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, ip_collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
if index_type == IndexType.IVF_PQ:
pytest.skip("Skip some PQ cases")
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
connect.flush([ip_collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, ip_collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search(ip_collection, 1, vector)
status, ids = connect.insert(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search(ip_collection, 1, vector)
status, ids = connect.insert(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
assert status.OK()
connect.flush([ip_collection])
status, result = connect.search(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
connect.flush([ip_collection])
status, result = connect.search(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
time.sleep(add_interval_time)
status, result = connect.search(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.insert(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status, result = connect.search(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.insert(ip_collection, vectors, ids)
assert status.OK()
connect.flush([ip_collection])
assert len(ids) == nq
# check search result
status, result = connect.search(ip_collection, top_k, vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_ids_no_ids(self, connect, ip_collection):
'''
target: check the result of insert, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.insert(ip_collection, vectors, ids)
assert status.OK()
status, ids = connect.insert(ip_collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_not_ids_ids(self, connect, ip_collection):
'''
target: check the result of insert, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.insert(ip_collection, vectors)
assert status.OK()
status, ids = connect.insert(ip_collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.insert(ip_collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_ids_invalid(self, connect, ip_collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for i in range(nq)]
with pytest.raises(Exception) as e:
status, ids = connect.insert(ip_collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert(self, connect, ip_collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.insert(ip_collection, vectors)
assert status.OK()
assert len(ids) == nq
# @pytest.mark.level(2)
# def test_insert_without_connect(self, dis_connect, ip_collection):
# '''
# target: test add vectors without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nq = 5
# vectors = gen_vectors(nq, dim)
# with pytest.raises(Exception) as e:
# status, ids = dis_connect.insert(ip_collection, vectors)
def test_add_vector_dim_not_matched(self, connect, ip_collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.insert(ip_collection, vector)
assert not status.OK()
def test_insert_dim_not_matched(self, connect, ip_collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.insert(ip_collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, ip_collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.insert(ip_collection, vectors)
time.sleep(add_interval_time)
status, result = connect.search(ip_collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of IP
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
for j in range(10):
for i in range(20):
status, ids = connect.insert(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddAdvance:
@pytest.fixture(
scope="function",
params=[
1,
1000,
6000
],
)
def insert_count(self, request):
yield request.param
def test_insert_much(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.insert(collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_ip(self, connect, ip_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.insert(ip_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_jaccard(self, connect, jac_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.insert(jac_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_hamming(self, connect, ham_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.insert(ham_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_tanimoto(self, connect, tanimoto_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.insert(tanimoto_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
class TestNameInvalid(object):
"""
Test adding vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
vectors = gen_vectors(1, dim)
status, result = connect.insert(collection_name, vectors)
assert not status.OK()
@pytest.mark.level(2)
def test_insert_with_invalid_tag_name(self, connect, get_collection_name, get_tag_name):
collection_name = get_collection_name
tag_name = get_tag_name
vectors = gen_vectors(1, dim)
status, result = connect.insert(collection_name, vectors, partition_tag=tag_name)
assert not status.OK()
class TestAddCollectionVectorsInvalid(object):
single_vector = gen_single_vector(dim)
vectors = gen_vectors(2, dim)
"""
Test adding vectors with invalid vectors
"""
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def gen_vector(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vector_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_single_vector = copy.deepcopy(self.single_vector)
tmp_single_vector[0][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.insert(collection, tmp_single_vector)
@pytest.mark.level(2)
def test_insert_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.insert(collection, tmp_vectors)
@pytest.mark.level(2)
def test_insert_with_invalid_vectors_jaccard(self, connect, jac_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.insert(jac_collection, tmp_vectors)
@pytest.mark.level(2)
def test_insert_with_invalid_vectors_hamming(self, connect, ham_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.insert(ham_collection, tmp_vectors)
|
ssd_model.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SSD300 Model Configuration.
References:
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
SSD: Single Shot MultiBox Detector
arXiv:1512.02325
Ported from MLPerf reference implementation:
https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import re
import threading
import tensorflow.compat.v1 as tf
from cnn_quantization.tf_cnn_benchmarks import constants
from cnn_quantization.tf_cnn_benchmarks import mlperf
from cnn_quantization.tf_cnn_benchmarks import ssd_constants
from cnn_quantization.tf_cnn_benchmarks.cnn_util import log_fn
from cnn_quantization.tf_cnn_benchmarks.models import model as model_lib
from cnn_quantization.tf_cnn_benchmarks.models import resnet_model
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
BACKBONE_MODEL_SCOPE_NAME = 'resnet34_backbone'
class SSD300Model(model_lib.CNNModel):
"""Single Shot Multibox Detection (SSD) model for 300x300 image datasets."""
def __init__(self, label_num=ssd_constants.NUM_CLASSES, batch_size=32,
learning_rate=1e-3, backbone='resnet34', params=None):
super(SSD300Model, self).__init__('ssd300', 300, batch_size, learning_rate,
params=params)
# For COCO dataset, 80 categories + 1 background = 81 labels
self.label_num = label_num
# Currently only support ResNet-34 as backbone model
if backbone != 'resnet34':
raise ValueError('Invalid backbone model %s for SSD.' % backbone)
mlperf.logger.log(key=mlperf.tags.BACKBONE, value=backbone)
# Number of channels and default boxes associated with the following layers:
# ResNet34 layer, Conv7, Conv8_2, Conv9_2, Conv10_2, Conv11_2
self.out_chan = [256, 512, 512, 256, 256, 256]
mlperf.logger.log(key=mlperf.tags.LOC_CONF_OUT_CHANNELS,
value=self.out_chan)
# Number of default boxes from layers of different scales
# 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
self.num_dboxes = [4, 6, 6, 6, 4, 4]
mlperf.logger.log(key=mlperf.tags.NUM_DEFAULTS_PER_CELL,
value=self.num_dboxes)
# TODO(haoyuzhang): in order to correctly restore in replicated mode, need
# to create a saver for each tower before graph is finalized. Use variable
# manager for better efficiency.
self.backbone_savers = []
# Collected predictions for eval stage. It maps each image id in eval
# dataset to a dict containing the following information:
# source_id: raw ID of image
# raw_shape: raw shape of image
# pred_box: encoded box coordinates of prediction
# pred_scores: scores of classes in prediction
self.predictions = {}
# Global step when predictions are collected.
self.eval_global_step = 0
# Average precision. In asynchronous eval mode, this is the latest AP we
# get so far and may not be the results at current eval step.
self.eval_coco_ap = 0
# Process, queues, and thread for asynchronous evaluation. When enabled,
# create a separte process (async_eval_process) that continously pull
# intermediate results from the predictions queue (a multiprocessing queue),
# process them, and push final results into results queue (another
# multiprocessing queue). The main thread is responsible to push message
# into predictions queue, and start a separate thread to continuously pull
# messages from results queue to update final results.
# Message in predictions queue should be a tuple of two elements:
# (evaluation step, predictions)
# Message in results queue should be a tuple of two elements:
# (evaluation step, final results)
self.async_eval_process = None
self.async_eval_predictions_queue = None
self.async_eval_results_queue = None
self.async_eval_results_getter_thread = None
# The MLPerf reference uses a starting lr of 1e-3 at bs=32.
self.base_lr_batch_size = 32
def skip_final_affine_layer(self):
return True
def add_backbone_model(self, cnn):
# --------------------------------------------------------------------------
# Resnet-34 backbone model -- modified for SSD
# --------------------------------------------------------------------------
# Input 300x300, output 150x150
cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)
cnn.mpool(3, 3, 2, 2, mode='SAME')
resnet34_layers = [3, 4, 6, 3]
version = 'v1'
# ResNet-34 block group 1
# Input 150x150, output 75x75
for i in range(resnet34_layers[0]):
# Last argument forces residual_block to use projection shortcut, even
# though the numbers of input and output channels are equal
resnet_model.residual_block(cnn, 64, 1, version)
# ResNet-34 block group 2
# Input 75x75, output 38x38
for i in range(resnet34_layers[1]):
stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 128, stride, version, i == 0)
# ResNet-34 block group 3
# This block group is modified: first layer uses stride=1 so that the image
# size does not change in group of layers
# Input 38x38, output 38x38
for i in range(resnet34_layers[2]):
# The following line is intentionally commented out to differentiate from
# the original ResNet-34 model
# stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 256, stride, version, i == 0)
# ResNet-34 block group 4: removed final block group
# The following 3 lines are intentially commented out to differentiate from
# the original ResNet-34 model
# for i in range(resnet34_layers[3]):
# stride = 2 if i == 0 else 1
# resnet_model.residual_block(cnn, 512, stride, version, i == 0)
def add_inference(self, cnn):
cnn.use_batch_norm = True
cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,
'epsilon': ssd_constants.BATCH_NORM_EPSILON,
'scale': True}
with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):
self.add_backbone_model(cnn)
# --------------------------------------------------------------------------
# SSD additional layers
# --------------------------------------------------------------------------
def add_ssd_layer(cnn, depth, k_size, stride, mode):
return cnn.conv(
depth,
k_size,
k_size,
stride,
stride,
mode=mode,
use_batch_norm=False,
kernel_initializer=contrib_layers.xavier_initializer())
# Activations for feature maps of different layers
self.activations = [cnn.top_layer]
# Conv7_1, Conv7_2
# Input 38x38, output 19x19
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv8_1, Conv8_2
# Input 19x19, output 10x10
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv9_1, Conv9_2
# Input 10x10, output 5x5
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))
# Conv10_1, Conv10_2
# Input 5x5, output 3x3
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
# Conv11_1, Conv11_2
# Input 3x3, output 1x1
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
self.loc = []
self.conf = []
for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):
l = cnn.conv(
nd * 4,
3,
3,
1,
1,
input_layer=ac,
num_channels_in=oc,
activation=None,
use_batch_norm=False,
kernel_initializer=contrib_layers.xavier_initializer())
scale = l.get_shape()[-1]
# shape = [batch_size, nd * 4, scale, scale]
l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])
# shape = [batch_size, nd, 4, scale, scale]
l = tf.transpose(l, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, 4]
self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))
# shape = [batch_size, nd * scale * scale, 4]
c = cnn.conv(
nd * self.label_num,
3,
3,
1,
1,
input_layer=ac,
num_channels_in=oc,
activation=None,
use_batch_norm=False,
kernel_initializer=contrib_layers.xavier_initializer())
# shape = [batch_size, nd * label_num, scale, scale]
c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])
# shape = [batch_size, nd, label_num, scale, scale]
c = tf.transpose(c, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, label_num]
self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))
# shape = [batch_size, nd * scale * scale, label_num]
# Shape of locs: [batch_size, NUM_SSD_BOXES, 4]
# Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]
locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)
# Pack location and confidence outputs into a single output layer
# Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]
logits = tf.concat([locs, confs], 2)
cnn.top_layer = logits
cnn.top_size = 4 + self.label_num
return cnn.top_layer
def get_learning_rate(self, global_step, batch_size):
rescaled_lr = self.get_scaled_base_learning_rate(batch_size)
# Defined in MLPerf reference model
boundaries = [160000, 200000]
boundaries = [b * self.base_lr_batch_size // batch_size for b in boundaries]
decays = [1, 0.1, 0.01]
learning_rates = [rescaled_lr * d for d in decays]
lr = tf.train.piecewise_constant(global_step, boundaries, learning_rates)
warmup_steps = int(118287 / batch_size * 5)
warmup_lr = (
rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
def get_scaled_base_learning_rate(self, batch_size):
"""Calculates base learning rate for creating lr schedule.
In replicated mode, gradients are summed rather than averaged which, with
the sgd and momentum optimizers, increases the effective learning rate by
lr * num_gpus. Dividing the base lr by num_gpus negates the increase.
Args:
batch_size: Total batch-size.
Returns:
Base learning rate to use to create lr schedule.
"""
base_lr = self.learning_rate
if self.params.variable_update == 'replicated':
base_lr = self.learning_rate / self.params.num_gpus
scaled_lr = base_lr * (batch_size / self.base_lr_batch_size)
return scaled_lr
def _collect_backbone_vars(self):
backbone_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME)
var_list = {}
# Assume variables in the checkpoint are following the naming convention of
# a model checkpoint trained with TF official model
# TODO(haoyuzhang): the following variable name parsing is hacky and easy
# to break if there is change in naming convention of either benchmarks or
# official models.
for v in backbone_vars:
# conv2d variable example (model <-- checkpoint):
# v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel
if 'conv2d' in v.name:
re_match = re.search(r'conv(\d+)/conv2d/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'conv2d', layer_id, param_name)
var_list[vname_in_ckpt] = v
# batchnorm varariable example:
# v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma
elif 'batchnorm' in v.name:
re_match = re.search(r'batchnorm(\d+)/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'batch_normalization', layer_id, param_name)
var_list[vname_in_ckpt] = v
return var_list
def _var_name_in_official_model_ckpt(self, layer_name, layer_id, param_name):
"""Return variable names according to convention in TF official models."""
vname_in_ckpt = layer_name
if layer_id > 0:
vname_in_ckpt += '_' + str(layer_id)
vname_in_ckpt += '/' + param_name
return vname_in_ckpt
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):
"""Computes the localization loss.
Computes the localization loss using smooth l1 loss.
Args:
pred_loc: a flatten tensor that includes all predicted locations. The
shape is [batch_size, num_anchors, 4].
gt_loc: a tensor representing box regression targets in
[batch_size, num_anchors, 4].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets, used as the loss normalizater. The shape is [batch_size].
Returns:
box_loss: a float32 representing total box regression loss.
"""
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(
gt_loc, pred_loc,
reduction=tf.losses.Reduction.NONE
), axis=2)
smooth_l1 = tf.multiply(smooth_l1, float_mask)
box_loss = tf.reduce_sum(smooth_l1, axis=1)
return tf.reduce_mean(box_loss / num_matched_boxes)
def _classification_loss(self, pred_label, gt_label, num_matched_boxes):
"""Computes the classification loss.
Computes the classification loss with hard negative mining.
Args:
pred_label: a flatten tensor that includes all predicted class. The shape
is [batch_size, num_anchors, num_classes].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets. This is used as the loss normalizater.
Returns:
box_loss: a float32 representing total box regression loss.
"""
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
gt_label, pred_label, reduction=tf.losses.Reduction.NONE)
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
# Hard example mining
neg_masked_cross_entropy = cross_entropy * (1 - float_mask)
relative_position = contrib_framework.argsort(
contrib_framework.argsort(
neg_masked_cross_entropy, direction='DESCENDING'))
num_neg_boxes = tf.minimum(
tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,
ssd_constants.NUM_SSD_BOXES)
top_k_neg_mask = tf.cast(tf.less(
relative_position,
tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))
), tf.float32)
class_loss = tf.reduce_sum(
tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)
return tf.reduce_mean(class_loss / num_matched_boxes)
def add_backbone_saver(self):
# Create saver with mapping from variable names in checkpoint of backbone
# model to variables in SSD model
backbone_var_list = self._collect_backbone_vars()
self.backbone_savers.append(tf.train.Saver(backbone_var_list))
def load_backbone_model(self, sess, backbone_model_path):
for saver in self.backbone_savers:
saver.restore(sess, backbone_model_path)
def get_input_data_types(self, subset):
if subset == 'validation':
return [self.data_type, tf.float32, tf.float32, tf.float32, tf.int32]
return [self.data_type, tf.float32, tf.float32, tf.float32]
def get_input_shapes(self, subset):
"""Return encoded tensor shapes for train and eval data respectively."""
if subset == 'validation':
# Validation data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. source image IDs
# 5. raw image shapes
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 4],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 1],
[self.batch_size],
[self.batch_size, 3],
]
# Training data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. numbers of objects in images
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1],
[self.batch_size]
]
def accuracy_function(self, inputs, logits):
"""Returns the ops to measure the mean precision of the model."""
try:
from cnn_quantization.tf_cnn_benchmarks import ssd_dataloader # pylint: disable=g-import-not-at-top
from tensorflow_models.object_detection.box_coders import faster_rcnn_box_coder # pylint: disable=g-import-not-at-top
from tensorflow_models.object_detection.core import box_coder # pylint: disable=g-import-not-at-top
from tensorflow_models.object_detection.core import box_list # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs; '
'To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
# Unpack model output back to locations and confidence scores of predictions
# pred_locs: relative locations (coordiates) of objects in all SSD boxes
# shape: [batch_size, NUM_SSD_BOXES, 4]
# pred_labels: confidence scores of objects being of all categories
# shape: [batch_size, NUM_SSD_BOXES, label_num]
pred_locs, pred_labels = tf.split(logits, [4, self.label_num], 2)
ssd_box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=ssd_constants.BOX_CODER_SCALES)
anchors = box_list.BoxList(
tf.convert_to_tensor(ssd_dataloader.DefaultBoxes()('ltrb')))
pred_boxes = box_coder.batch_decode(
encoded_boxes=pred_locs, box_coder=ssd_box_coder, anchors=anchors)
pred_scores = tf.nn.softmax(pred_labels, axis=2)
# TODO(haoyuzhang): maybe use `gt_boxes` and `gt_classes` for visualization.
_, gt_boxes, gt_classes, source_id, raw_shape = inputs # pylint: disable=unused-variable
return {
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_BOXES): pred_boxes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_SCORES): pred_scores,
# TODO(haoyuzhang): maybe use these values for visualization.
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_boxes': gt_boxes,
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_classes': gt_classes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.SOURCE_ID): source_id,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.RAW_SHAPE): raw_shape
}
def postprocess(self, results):
"""Postprocess results returned from model."""
try:
from cnn_quantization.tf_cnn_benchmarks import coco_metric # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs; '
'To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
pred_boxes = results[ssd_constants.PRED_BOXES]
pred_scores = results[ssd_constants.PRED_SCORES]
# TODO(haoyuzhang): maybe use these values for visualization.
# gt_boxes = results['gt_boxes']
# gt_classes = results['gt_classes']
source_id = results[ssd_constants.SOURCE_ID]
raw_shape = results[ssd_constants.RAW_SHAPE]
# COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due
# to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting
# `num_eval_epochs` to 1 is not enough and will often miss some images. We
# expect user to set `num_eval_epochs` to >1, which will leave some unused
# images from previous steps in `predictions`. Here we check if we are doing
# eval at a new global step.
if results['global_step'] > self.eval_global_step:
self.eval_global_step = results['global_step']
self.predictions.clear()
for i, sid in enumerate(source_id):
self.predictions[int(sid)] = {
ssd_constants.PRED_BOXES: pred_boxes[i],
ssd_constants.PRED_SCORES: pred_scores[i],
ssd_constants.SOURCE_ID: source_id[i],
ssd_constants.RAW_SHAPE: raw_shape[i]
}
# COCO metric calculates mAP only after a full epoch of evaluation. Return
# dummy results for top_N_accuracy to be compatible with benchmar_cnn.py.
if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES:
log_fn('Got results for all {:d} eval examples. Calculate mAP...'.format(
ssd_constants.COCO_NUM_VAL_IMAGES))
annotation_file = os.path.join(self.params.data_dir,
ssd_constants.ANNOTATION_FILE)
# Size of predictions before decoding about 15--30GB, while size after
# decoding is 100--200MB. When using async eval mode, decoding takes
# 20--30 seconds of main thread time but is necessary to avoid OOM during
# inter-process communication.
decoded_preds = coco_metric.decode_predictions(self.predictions.values())
self.predictions.clear()
if self.params.collect_eval_results_async:
def _eval_results_getter():
"""Iteratively get eval results from async eval process."""
while True:
step, eval_results = self.async_eval_results_queue.get()
self.eval_coco_ap = eval_results['COCO/AP']
mlperf.logger.log_eval_accuracy(
self.eval_coco_ap, step, self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
if self.reached_target():
# Reached target, clear all pending messages in predictions queue
# and insert poison pill to stop the async eval process.
while not self.async_eval_predictions_queue.empty():
self.async_eval_predictions_queue.get()
self.async_eval_predictions_queue.put('STOP')
break
if not self.async_eval_process:
# Limiting the number of messages in predictions queue to prevent OOM.
# Each message (predictions data) can potentially consume a lot of
# memory, and normally there should only be few messages in the queue.
# If often blocked on this, consider reducing eval frequency.
self.async_eval_predictions_queue = multiprocessing.Queue(2)
self.async_eval_results_queue = multiprocessing.Queue()
# Reason to use a Process as opposed to Thread is mainly the
# computationally intensive eval runner. Python multithreading is not
# truly running in parallel, a runner thread would get significantly
# delayed (or alternatively delay the main thread).
self.async_eval_process = multiprocessing.Process(
target=coco_metric.async_eval_runner,
args=(self.async_eval_predictions_queue,
self.async_eval_results_queue,
annotation_file))
self.async_eval_process.daemon = True
self.async_eval_process.start()
self.async_eval_results_getter_thread = threading.Thread(
target=_eval_results_getter, args=())
self.async_eval_results_getter_thread.daemon = True
self.async_eval_results_getter_thread.start()
self.async_eval_predictions_queue.put(
(self.eval_global_step, decoded_preds))
return {'top_1_accuracy': 0, 'top_5_accuracy': 0.}
eval_results = coco_metric.compute_map(decoded_preds, annotation_file)
self.eval_coco_ap = eval_results['COCO/AP']
ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
for metric_key, metric_value in eval_results.items():
ret[constants.SIMPLE_VALUE_RESULT_PREFIX + metric_key] = metric_value
mlperf.logger.log_eval_accuracy(self.eval_coco_ap, self.eval_global_step,
self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
return ret
log_fn('Got {:d} out of {:d} eval examples.'
' Waiting for the remaining to calculate mAP...'.format(
len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES))
return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
def get_synthetic_inputs(self, input_name, nclass):
"""Generating synthetic data matching real data shape and type."""
inputs = tf.random_uniform(
self.get_input_shapes('train')[0], dtype=self.data_type)
inputs = contrib_framework.local_variable(inputs, name=input_name)
boxes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4], dtype=tf.float32)
classes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1], dtype=tf.float32)
nboxes = tf.random_uniform(
[self.batch_size], minval=1, maxval=10, dtype=tf.float32)
return (inputs, boxes, classes, nboxes)
def reached_target(self):
return (self.params.stop_at_top_1_accuracy and
self.eval_coco_ap >= self.params.stop_at_top_1_accuracy)
|
process.py
|
import shlex
from subprocess import Popen, PIPE, TimeoutExpired
from queue import Queue, Empty
from threading import Thread
import psutil
import time
from utils import Map
class Process:
"""Allows to run processes with limits
Attributes:
cmd (str): Command to execute
input (Optional[str]): Input to be passed to processes STDIN
time_limit (Optional[int]): Time limit in milliseconds
memory_limit (Optional[int]): Memory limit in kB
stdout_file (Optional[str]): Name of file STDOUT should be written to
stderr_file (Optional[str]): Name of file STDERR should be written to
process (Popen): Popen process object
status (Map): Current status of program including
time_limit_exceeded (bool): Is time limit exceeded
memory_limit_exceeded (bool): Is memory limit exceeded
stdout (str): All STDOUT of process
stderr (str): All STDERR of process
time (int): Execution time on milliseconds. This attribute is None until process finished.
memory (int): Maximum memory use in kB. This attribute is None until process finished.
retuncode (int): Return code of process. This attribute is None until process finished.
"""
def __init__(self, cmd, input=None, time_limit=None, memory_limit=None, stdout_file=None, stderr_file=None):
"""Init method of process
Args:
cmd (str): Command to execute
input (Optional[str]): Input to be passed to processes STDIN
time_limit (Optional[int]): Time limit in milliseconds
memory_limit (Optional[int]): Memory limit in kB
stdout_file (Optional[str]): Name of file STDOUT should be written to
stderr_file (Optional[str]): Name of file STDERR should be written to
"""
self.cmd, self.input, self.time_limit, self.memory_limit, self.stdout_file, self.stderr_file\
= shlex.split(cmd), input, time_limit, memory_limit, stdout_file, stderr_file
if self.input:
self.input = self.input.encode('UTF-8')
self.process = None
# status variables
self.status = Map()
self.status.time_limit_exceeded = False
self.status.memory_limit_exceeded = False
self.status.stdout = None
self.status.stderr = None
self.status.time = None
self.status.memory = None
self.status.returncode = None
def run(self):
"""Runs process with configuration set.
"""
self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
psutil_process = psutil.Process(self.process.pid)
# pause process to allow bootstrap code execute before it
psutil_process.suspend()
stdout_summary = ''
stderr_summary = ''
if self.memory_limit is None:
try:
psutil_process.resume()
start = time.time()
(stdout_summary, stderr_summary) = self.process.communicate(self.input, self.time_limit)
# strange line
self.status.time = time.time() - start
self.status.returncode = self.process.poll()
except TimeoutExpired:
self.status.time_limit_exceeded = True
self.process.kill()
else:
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
stdout_queue = Queue()
stdout_thread = Thread(target=enqueue_output, args=(self.process.stdout, stdout_queue))
stdout_thread.daemon = True
stdout_thread.start()
stderr_queue = Queue()
stderr_thread = Thread(target=enqueue_output, args=(self.process.stderr, stderr_queue))
stderr_thread.daemon = True
stderr_thread.start()
max_mem = 0
# start timer
start = time.time()
# bootstrap finished, resume
psutil_process.resume()
# write data to STDIN of program
if self.input:
try:
self.process.stdin.write(self.input)
self.process.stdin.close()
except BrokenPipeError:
pass # program does not accept any STDIN
# start main cycle
while time.time() - start <= (self.time_limit or float('inf')):
max_mem = max(max_mem, psutil_process.memory_info().vms)
# Memory limit exceeded
if max_mem > self.memory_limit:
self.status.memory_limit_exceeded = True
break
# process finished
if self.process.poll() is not None:
self.status.returncode = self.process.returncode
break
# Time limit exceeded
if self.status.returncode is None:
if not self.status.memory_limit_exceeded:
self.status.time_limit_exceeded = True
self.process.kill()
self.status.time = round((time.time() - start) * 1000)
self.status.memory = max_mem / 1024
stdout_thread.join()
stderr_thread.join()
# get lost STDOUT
to_file = isinstance(self.stdout_file, str)
if to_file:
f = open(self.stdout_file, 'w')
while True:
try:
line = stdout_queue.get_nowait().decode('UTF-8')
except Empty:
break
else:
if to_file:
f.write(line)
stdout_summary += line
if to_file:
f.close()
# get lost STDERR
to_file = isinstance(self.stderr_file, str)
if to_file:
f = open(self.stderr_file, 'w')
while True:
try:
line = stderr_queue.get_nowait().decode('UTF-8')
except Empty:
break
else:
if to_file:
f.write(line)
stderr_summary += line
if to_file:
f.close()
# save STDOUT and STDERR to class vars
if stdout_summary:
self.status.stdout = stdout_summary
if stderr_summary:
self.status.stderr = stderr_summary
|
main.py
|
# Python 3 server example
from http.server import BaseHTTPRequestHandler, HTTPServer
from playsound import playsound
import threading
hostName = "0.0.0.0"
serverPort = 8080
def sound():
playsound("sound.mp3")
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("Playing sound...", "utf-8"))
thread = threading.Thread(target=sound)
thread.start()
if __name__ == "__main__":
webServer = HTTPServer((hostName, serverPort), MyServer)
print("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
print("Server stopped.")
|
index.py
|
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import gzip
import mimetypes
import posixpath
import pkg_resources
import random
import socket
import ssl
import string
import zlib
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled,\
InstallationError
from pip.backwardcompat import (WindowsError, BytesIO,
Queue, urlparse,
URLError, HTTPError, u,
product, url2pathname,
Empty as QueueEmpty)
from pip.backwardcompat import CertificateError
from pip.download import urlopen, path_to_url2, url_to_path, geturl, Urllib2HeadRequest
from pip.wheel import Wheel, wheel_ext, wheel_setuptools_support, setuptools_requirement
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip.vendor import html5lib
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=False, allow_external=[], allow_insecure=[],
allow_all_external=False, allow_all_insecure=False,
allow_all_prereleases=False):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_insecure = set(normalize_name(n) for n in allow_insecure)
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Do we allow unsafe and unverifiable files?
self.allow_all_insecure = allow_all_insecure
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_insecure = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
@property
def use_wheel(self):
return self._use_wheel
@use_wheel.setter
def use_wheel(self, value):
self._use_wheel = value
if self._use_wheel and not wheel_setuptools_support():
raise InstallationError("pip's wheel support requires %s." % setuptools_requirement)
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url2(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == InfLink: # existing install
pri = 1
elif link.wheel:
# all wheel links are known to be supported at this stage
pri = -(link.wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_insecure:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-insecure %s to allow)." % req.name)
raise DistributionNotFound('No distributions at all found for %s' % req)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(req.satisfied_by.parsed_version, InfLink, req.satisfied_by.version)]
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
#this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not InfLink:
logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is InfLink])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is InfLink:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in all_versions])))
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_insecure:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-insecure %s to allow)." % req.name)
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][1] is InfLink:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links"""
pending_queue = Queue()
for location in locations:
pending_queue.put(location)
done = []
seen = set()
threads = []
for i in range(min(10, len(locations))):
t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen))
t.setDaemon(True)
threads.append(t)
t.start()
for t in threads:
t.join()
return done
_log_lock = threading.Lock()
def _get_queued_page(self, req, pending_queue, done, seen):
while 1:
try:
location = pending_queue.get(False)
except QueueEmpty:
return
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
done.append(page)
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (not normalized in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and not normalized in self.allow_insecure
and not self.allow_all_insecure):
logger.debug("Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link)
self.need_warn_insecure = True
continue
pending_queue.put(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if link.wheel and link.wheel.name.lower() == search_name.lower():
version = link.wheel.version
if not link.wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows Binary Wheel. This is paired
# with a change to PyPI disabling uploads for the same. Once
# we have a mechanism for enabling support for binary wheels
# on linux that deals with the inherent problems of binary
# distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (not platform.startswith('win')
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not link.wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not normalize_name(search_name).lower() in self.allow_insecure
and not self.allow_all_insecure):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_insecure = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req, cache=self.cache)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True):
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = urlopen(url)
real_url = geturl(resp)
headers = resp.info()
contents = resp.read()
encoding = headers.get('Content-Encoding', None)
#XXX need to handle exceptions and add testing for this
if encoding is not None:
if encoding == 'gzip':
contents = gzip.GzipFile(fileobj=BytesIO(contents)).read()
if encoding == 'deflate':
contents = zlib.decompress(contents)
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug('Skipping page %s because of Content-Type: %s' %
(link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(u(contents), real_url, headers, trusted=link.trusted)
except (HTTPError, URLError, socket.timeout, socket.error, OSError, WindowsError):
e = sys.exc_info()[1]
desc = str(e)
if isinstance(e, socket.timeout):
log_meth = logger.info
level =1
desc = 'timed out'
elif isinstance(e, URLError):
#ssl/certificate error
if hasattr(e, 'reason') and (isinstance(e.reason, ssl.SSLError) or isinstance(e.reason, CertificateError)):
desc = 'There was a problem confirming the ssl certificate: %s' % e
log_meth = logger.notify
else:
log_meth = logger.info
if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout):
desc = 'timed out'
level = 1
else:
level = 2
elif isinstance(e, HTTPError) and e.code == 404:
## FIXME: notify?
log_meth = logger.info
level = 2
else:
log_meth = logger.info
level = 1
log_meth('Could not fetch URL %s: %s' % (link, desc))
log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
return None
if cache is not None:
cache.add_page([url, real_url], inst)
return inst
@staticmethod
def _get_content_type(url):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
req = Urllib2HeadRequest(url, headers={'Host': netloc})
resp = urlopen(req)
try:
if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'):
## FIXME: doesn't handle redirects
return ''
return resp.info().get('content-type', '')
finally:
resp.close()
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(anchor.get("rel")
and "internal" in anchor.get("rel").split())
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
# Set whether it's a wheel
self.wheel = None
if url != Inf and self.splitext()[1] == wheel_ext:
self.wheel = Wheel(self.filename)
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
#An "Infinite Link" that compares greater than other links
InfLink = Link(Inf) #this object is not currently used as a sortable
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
|
_cancel_many_calls_test.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test making many calls and immediately cancelling most of them."""
import threading
import unittest
from grpc._cython import cygrpc
from grpc.framework.foundation import logging_pool
from tests.unit._cython import test_utilities
from tests.unit.framework.common import test_constants
_EMPTY_FLAGS = 0
_EMPTY_METADATA = ()
_SERVER_SHUTDOWN_TAG = 'server_shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TAG = 'receive_close_on_server'
_RECEIVE_MESSAGE_TAG = 'receive_message'
_SERVER_COMPLETE_CALL_TAG = 'server_complete_call'
_SUCCESS_CALL_FRACTION = 1.0 / 8.0
_SUCCESSFUL_CALLS = int(test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
_UNSUCCESSFUL_CALLS = test_constants.RPC_CONCURRENCY - _SUCCESSFUL_CALLS
class _State(object):
def __init__(self):
self.condition = threading.Condition()
self.handlers_released = False
self.parked_handlers = 0
self.handled_rpcs = 0
def _is_cancellation_event(event):
return (event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
event.batch_operations[0].cancelled())
class _Handler(object):
def __init__(self, state, completion_queue, rpc_event):
self._state = state
self._lock = threading.Lock()
self._completion_queue = completion_queue
self._call = rpc_event.call
def __call__(self):
with self._state.condition:
self._state.parked_handlers += 1
if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
self._state.condition.notify_all()
while not self._state.handlers_released:
self._state.condition.wait()
with self._lock:
self._call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_RECEIVE_CLOSE_ON_SERVER_TAG)
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_RECEIVE_MESSAGE_TAG)
first_event = self._completion_queue.poll()
if _is_cancellation_event(first_event):
self._completion_queue.poll()
else:
with self._lock:
operations = (
cygrpc.SendInitialMetadataOperation(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x79\x57', _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
_EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
_EMPTY_FLAGS),
)
self._call.start_server_batch(operations,
_SERVER_COMPLETE_CALL_TAG)
self._completion_queue.poll()
self._completion_queue.poll()
def _serve(state, server, server_completion_queue, thread_pool):
for _ in range(test_constants.RPC_CONCURRENCY):
call_completion_queue = cygrpc.CompletionQueue()
server.request_call(call_completion_queue, server_completion_queue,
_REQUEST_CALL_TAG)
rpc_event = server_completion_queue.poll()
thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
with state.condition:
state.handled_rpcs += 1
if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
state.condition.notify_all()
server_completion_queue.poll()
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def events(self, at_least):
with self._condition:
while len(self._events) < at_least:
self._condition.wait()
return tuple(self._events)
class CancelManyCallsTest(unittest.TestCase):
def testCancelManyCalls(self):
server_thread_pool = logging_pool.pool(
test_constants.THREAD_CONCURRENCY)
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server([(
b'grpc.so_reuseport',
0,
)], False)
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode(), None,
None)
state = _State()
server_thread_args = (
state,
server,
server_completion_queue,
server_thread_pool,
)
server_thread = threading.Thread(target=_serve, args=server_thread_args)
server_thread.start()
client_condition = threading.Condition()
client_due = set()
with client_condition:
client_calls = []
for index in range(test_constants.RPC_CONCURRENCY):
tag = 'client_complete_call_{0:04d}_tag'.format(index)
client_call = channel.integrated_call(
_EMPTY_FLAGS, b'/twinkies', None, None, _EMPTY_METADATA,
None, ((
(
cygrpc.SendInitialMetadataOperation(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x45\x56',
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(
_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
tag,
),))
client_due.add(tag)
client_calls.append(client_call)
client_events_future = test_utilities.SimpleFuture(lambda: tuple(
channel.next_call_event() for _ in range(_SUCCESSFUL_CALLS)))
with state.condition:
while True:
if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
state.condition.wait()
elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
state.condition.wait()
else:
state.handlers_released = True
state.condition.notify_all()
break
client_events_future.result()
with client_condition:
for client_call in client_calls:
client_call.cancel(cygrpc.StatusCode.cancelled, 'Cancelled!')
for _ in range(_UNSUCCESSFUL_CALLS):
channel.next_call_event()
channel.close(cygrpc.StatusCode.unknown, 'Cancelled on channel close!')
with state.condition:
server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
grab_shixin.py
|
import requests
import json
import redis
from threading import Thread
import time
import os
def grab_info(name, id_num):
url = 'http://192.168.30.248:8081/courtshixin'
data = {
"serialNum": "abc",
"pName": name,
"CID": id_num
}
response = requests.post(url, json.dumps(data)).content.decode()
return response
def get_task():
conn = redis.Redis()
item = conn.spop('court_lose_credit')
if item:
item = item.decode()
name = item.split(',')[0]
id_num = item.split(',')[1]
return name, id_num
def store_info(file):
with open(r'C:\Users\YongHu\Desktop\法院失信\weishixin_result\%s' % file, 'a+', encoding='utf-8') as f:
while True:
task = get_task()
if task:
name, id_num = task
while True:
try:
info = grab_info(name, id_num)
items = json.loads(info)
items['name'] = name
items['id_num'] = id_num
is_success = items['isSuccess']
if is_success:
f.write(json.dumps(items) + '\n')
print(items)
time.sleep(2.5)
break
time.sleep(2.5)
except Exception as e:
print(e)
else:
break
def main():
for i in range(10):
file = str(i) + '.log'
t = Thread(target=store_info, args=(file, ))
t.start()
def merge():
folder = r'C:\Users\YongHu\Desktop\法院失信\weishixin_result'
f = open(r'C:\Users\YongHu\Desktop\法院失信\result_fayuan_weishixin.log', 'w', encoding='utf-8')
file_list = os.listdir(folder)
for file in file_list:
with open(folder + '\\' + file, 'r', encoding='utf-8') as ff:
for line in ff:
f.write(line)
# info = json.loads(line)
# identifications = info['identifications']
# is_hit = identifications['isHit']
# if is_hit:
# f.write(line)
# print(line)
f.close()
if __name__ == '__main__':
main()
# merge()
|
keep_alive.py
|
#creating a web server for our bot and continuously ping this web server using uptime Robot
from flask import Flask
from threading import Thread #the server will run on a separate thread from our bot
app = Flask('') #creating a flask app
@app.route('/')
def home():
return "Hello . I am alive ehehe" #displays this to anyone who visits the server . The URL is created by repl.it
def run(): #runs the web server
app.run(host="0.0.0.0",port = 8080)
def keep_alive():
t = Thread(target = run)
t.start()
|
tests.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
import warnings
from datetime import datetime, timedelta
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.utils import six
from django.utils._os import upath
from django.utils.six.moves.urllib.request import urlopen
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaises(ImportError, get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError,
"No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageDeconstructionTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test',
'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir,
base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files),
{'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
self.assertRaisesMessage(
SuspiciousFileOperation, 'Storage can not find an available filename',
objs[1].limited_length.save, *(filename, ContentFile('Same Content'))
)
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_old_style_storage(self):
# Testing backward-compatibility with old-style storage backends that
# don't take ``max_length`` parameter in ``get_available_name()``
# and save(). A deprecation warning should be raised.
obj = Storage()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
obj.old_style.save('deprecated_storage_test.txt', ContentFile('Same Content'))
self.assertEqual(len(warns), 2)
self.assertEqual(
str(warns[0].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.save() will be removed in '
'Django 1.10.'
)
self.assertEqual(
str(warns[1].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.get_available_name() will '
'be removed in Django 1.10.'
)
self.assertEqual(obj.old_style.name, 'tests/deprecated_storage_test.txt')
self.assertEqual(obj.old_style.read(), b'Same Content')
obj.old_style.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "./django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
test_serialization.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import collections
import datetime
import os
import pathlib
import pickle
import subprocess
import string
import sys
import pyarrow as pa
import numpy as np
import pyarrow.tests.util as test_util
try:
import torch
except ImportError:
torch = None
# Blacklist the module in case `import torch` is costly before
# failing (ARROW-2071)
sys.modules['torch'] = None
try:
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
except ImportError:
coo_matrix = None
csr_matrix = None
csc_matrix = None
try:
import sparse
except ImportError:
sparse = None
# ignore all serialization deprecation warnings in this file, we test that the
# warnings are actually raised in test_serialization_deprecated.py
pytestmark = pytest.mark.filterwarnings("ignore:'pyarrow:DeprecationWarning")
def assert_equal(obj1, obj2):
if torch is not None and torch.is_tensor(obj1) and torch.is_tensor(obj2):
if obj1.is_sparse:
obj1 = obj1.to_dense()
if obj2.is_sparse:
obj2 = obj2.to_dense()
assert torch.equal(obj1, obj2)
return
module_numpy = (type(obj1).__module__ == np.__name__ or
type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ()) or
(hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) ==
set(list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different."
.format(
obj1,
obj2))
if obj1.__dict__ == {}:
print("WARNING: Empty dict in ", obj1)
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (pa.lib.is_named_tuple(type(obj1)) or
pa.lib.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif isinstance(obj1, pa.Array) and isinstance(obj2, pa.Array):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Tensor) and isinstance(obj2, pa.Tensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Tensor) and isinstance(obj2, pa.Tensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.SparseCOOTensor) and \
isinstance(obj2, pa.SparseCOOTensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.SparseCSRMatrix) and \
isinstance(obj2, pa.SparseCSRMatrix):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.SparseCSCMatrix) and \
isinstance(obj2, pa.SparseCSCMatrix):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.SparseCSFTensor) and \
isinstance(obj2, pa.SparseCSFTensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.RecordBatch) and isinstance(obj2, pa.RecordBatch):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Table) and isinstance(obj2, pa.Table):
assert obj1.equals(obj2)
else:
assert type(obj1) == type(obj2) and obj1 == obj2, \
"Objects {} and {} are different.".format(obj1, obj2)
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 999,
[1 << 100, [1 << 100]], "a", string.printable, "\u262F",
"hello world", "hello world", "\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {}, {(1, 2): 1}, {(): 2},
[1, "hello", 3.0], "\u262F", 42.0, (1.0, "hi"),
[1, 2, 3, None], [(None,), 3, 1.0], ["h", "e", "l", "l", "o", None],
(None, None), ("hello", None), (True, False),
{True: "hello", False: "world"}, {"hello": "world", 1: 42, 2.5: 45},
{"hello": {2, 3}, "world": {42.0}, "this": None},
np.int8(3), np.int32(4), np.int64(5),
np.uint8(3), np.uint32(4), np.uint64(5),
np.float16(1.9), np.float32(1.9),
np.float64(1.9), np.zeros([8, 20]),
np.random.normal(size=[17, 10]), np.array(["hi", 3]),
np.array(["hi", 3], dtype=object),
np.random.normal(size=[15, 13]).T
]
index_types = ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8')
tensor_types = ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'f2', 'f4', 'f8')
PRIMITIVE_OBJECTS += [0, np.array([["hi", "hi"], [1.3, 1]])]
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[4, 4]) for i in range(5)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
((((((((((),),),),),),),),),),
{"a": {"b": {"c": {"d": {}}}}},
]
class Foo:
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar:
def __init__(self):
for i, val in enumerate(COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz:
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux:
def __init__(self):
self.objs = [Foo(1), Foo(42)]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class SubQuxPickle(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [Exception("Test object."), CustomError(), Point(11, y=22),
Foo(), Bar(), Baz(), Qux(), SubQux(), SubQuxPickle(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
collections.Counter([1, 1, 1, 2, 2, 3, "a", "b"])]
def make_serialization_context():
with pytest.warns(DeprecationWarning):
context = pa.default_serialization_context()
context.register_type(Foo, "Foo")
context.register_type(Bar, "Bar")
context.register_type(Baz, "Baz")
context.register_type(Qux, "Quz")
context.register_type(SubQux, "SubQux")
context.register_type(SubQuxPickle, "SubQuxPickle", pickle=True)
context.register_type(Exception, "Exception")
context.register_type(CustomError, "CustomError")
context.register_type(Point, "Point")
context.register_type(NamedTupleExample, "NamedTupleExample")
return context
global_serialization_context = make_serialization_context()
def serialization_roundtrip(value, scratch_buffer,
context=global_serialization_context):
writer = pa.FixedSizeBufferWriter(scratch_buffer)
pa.serialize_to(value, writer, context=context)
reader = pa.BufferReader(scratch_buffer)
result = pa.deserialize_from(reader, None, context=context)
assert_equal(value, result)
_check_component_roundtrip(value, context=context)
def _check_component_roundtrip(value, context=global_serialization_context):
# Test to/from components
serialized = pa.serialize(value, context=context)
components = serialized.to_components()
from_comp = pa.SerializedPyObject.from_components(components)
recons = from_comp.deserialize(context=context)
assert_equal(value, recons)
@pytest.yield_fixture(scope='session')
def large_buffer(size=32*1024*1024):
return pa.allocate_buffer(size)
def large_memory_map(tmpdir_factory, size=100*1024*1024):
path = (tmpdir_factory.mktemp('data')
.join('pyarrow-serialization-tmp-file').strpath)
# Create a large memory mapped file
with open(path, 'wb') as f:
f.write(np.random.randint(0, 256, size=size)
.astype('u1')
.tobytes()
[:size])
return path
def test_clone():
context = pa.SerializationContext()
class Foo:
pass
def custom_serializer(obj):
return 0
def custom_deserializer(serialized_obj):
return (serialized_obj, 'a')
context.register_type(Foo, 'Foo', custom_serializer=custom_serializer,
custom_deserializer=custom_deserializer)
new_context = context.clone()
f = Foo()
serialized = pa.serialize(f, context=context)
deserialized = serialized.deserialize(context=context)
assert deserialized == (0, 'a')
serialized = pa.serialize(f, context=new_context)
deserialized = serialized.deserialize(context=new_context)
assert deserialized == (0, 'a')
def test_primitive_serialization_notbroken(large_buffer):
serialization_roundtrip({(1, 2): 2}, large_buffer)
def test_primitive_serialization_broken(large_buffer):
serialization_roundtrip({(): 2}, large_buffer)
def test_primitive_serialization(large_buffer):
for obj in PRIMITIVE_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_integer_limits(large_buffer):
# Check that Numpy scalars can be represented up to their limit values
# (except np.uint64 which is limited to 2**63 - 1)
for dt in [np.int8, np.int64, np.int32, np.int64,
np.uint8, np.uint64, np.uint32, np.uint64]:
scal = dt(np.iinfo(dt).min)
serialization_roundtrip(scal, large_buffer)
if dt is not np.uint64:
scal = dt(np.iinfo(dt).max)
serialization_roundtrip(scal, large_buffer)
else:
scal = dt(2**63 - 1)
serialization_roundtrip(scal, large_buffer)
for v in (2**63, 2**64 - 1):
scal = dt(v)
with pytest.raises(pa.ArrowInvalid):
pa.serialize(scal)
def test_serialize_to_buffer():
for nthreads in [1, 4]:
for value in COMPLEX_OBJECTS:
buf = pa.serialize(value).to_buffer(nthreads=nthreads)
result = pa.deserialize(buf)
assert_equal(value, result)
def test_complex_serialization(large_buffer):
for obj in COMPLEX_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_custom_serialization(large_buffer):
for obj in CUSTOM_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_default_dict_serialization(large_buffer):
pytest.importorskip("cloudpickle")
obj = collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
serialization_roundtrip(obj, large_buffer)
def test_numpy_serialization(large_buffer):
for t in ["bool", "int8", "uint8", "int16", "uint16", "int32",
"uint32", "float16", "float32", "float64", "<U1", "<U2", "<U3",
"<U4", "|S1", "|S2", "|S3", "|S4", "|O",
np.dtype([('a', 'int64'), ('b', 'float')]),
np.dtype([('x', 'uint32'), ('y', '<U8')])]:
obj = np.random.randint(0, 10, size=(100, 100)).astype(t)
serialization_roundtrip(obj, large_buffer)
obj = obj[1:99, 10:90]
serialization_roundtrip(obj, large_buffer)
def test_datetime_serialization(large_buffer):
data = [
# Principia Mathematica published
datetime.datetime(year=1687, month=7, day=5),
# Some random date
datetime.datetime(year=1911, month=6, day=3, hour=4,
minute=55, second=44),
# End of WWI
datetime.datetime(year=1918, month=11, day=11),
# Beginning of UNIX time
datetime.datetime(year=1970, month=1, day=1),
# The Berlin wall falls
datetime.datetime(year=1989, month=11, day=9),
# Another random date
datetime.datetime(year=2011, month=6, day=3, hour=4,
minute=0, second=3),
# Another random date
datetime.datetime(year=1970, month=1, day=3, hour=4,
minute=0, second=0)
]
for d in data:
serialization_roundtrip(d, large_buffer)
def test_torch_serialization(large_buffer):
pytest.importorskip("torch")
serialization_context = pa.default_serialization_context()
pa.register_torch_serialization_handlers(serialization_context)
# Dense tensors:
# These are the only types that are supported for the
# PyTorch to NumPy conversion
for t in ["float32", "float64",
"uint8", "int16", "int32", "int64"]:
obj = torch.from_numpy(np.random.randn(1000).astype(t))
serialization_roundtrip(obj, large_buffer,
context=serialization_context)
tensor_requiring_grad = torch.randn(10, 10, requires_grad=True)
serialization_roundtrip(tensor_requiring_grad, large_buffer,
context=serialization_context)
# Sparse tensors:
# These are the only types that are supported for the
# PyTorch to NumPy conversion
for t in ["float32", "float64",
"uint8", "int16", "int32", "int64"]:
i = torch.LongTensor([[0, 2], [1, 0], [1, 2]])
v = torch.from_numpy(np.array([3, 4, 5]).astype(t))
obj = torch.sparse_coo_tensor(i.t(), v, torch.Size([2, 3]))
serialization_roundtrip(obj, large_buffer,
context=serialization_context)
@pytest.mark.skipif(not torch or not torch.cuda.is_available(),
reason="requires pytorch with CUDA")
def test_torch_cuda():
# ARROW-2920: This used to segfault if torch is not imported
# before pyarrow
# Note that this test will only catch the issue if it is run
# with a pyarrow that has been built in the manylinux1 environment
torch.nn.Conv2d(64, 2, kernel_size=3, stride=1,
padding=1, bias=False).cuda()
def test_numpy_immutable(large_buffer):
obj = np.zeros([10])
writer = pa.FixedSizeBufferWriter(large_buffer)
pa.serialize_to(obj, writer, global_serialization_context)
reader = pa.BufferReader(large_buffer)
result = pa.deserialize_from(reader, None, global_serialization_context)
with pytest.raises(ValueError):
result[0] = 1.0
def test_numpy_base_object(tmpdir):
# ARROW-2040: deserialized Numpy array should keep a reference to the
# owner of its memory
path = os.path.join(str(tmpdir), 'zzz.bin')
data = np.arange(12, dtype=np.int32)
with open(path, 'wb') as f:
f.write(pa.serialize(data).to_buffer())
serialized = pa.read_serialized(pa.OSFile(path))
result = serialized.deserialize()
assert_equal(result, data)
serialized = None
assert_equal(result, data)
assert result.base is not None
# see https://issues.apache.org/jira/browse/ARROW-1695
def test_serialization_callback_numpy():
class DummyClass:
pass
def serialize_dummy_class(obj):
x = np.zeros(4)
return x
def deserialize_dummy_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(DummyClass, "DummyClass",
custom_serializer=serialize_dummy_class,
custom_deserializer=deserialize_dummy_class)
pa.serialize(DummyClass(), context=context)
def test_numpy_subclass_serialization():
# Check that we can properly serialize subclasses of np.ndarray.
class CustomNDArray(np.ndarray):
def __new__(cls, input_array):
array = np.asarray(input_array).view(cls)
return array
def serializer(obj):
return {'numpy': obj.view(np.ndarray)}
def deserializer(data):
array = data['numpy'].view(CustomNDArray)
return array
context = pa.default_serialization_context()
context.register_type(CustomNDArray, 'CustomNDArray',
custom_serializer=serializer,
custom_deserializer=deserializer)
x = CustomNDArray(np.zeros(3))
serialized = pa.serialize(x, context=context).to_buffer()
new_x = pa.deserialize(serialized, context=context)
assert type(new_x) == CustomNDArray
assert np.alltrue(new_x.view(np.ndarray) == np.zeros(3))
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_coo_tensor_serialization(index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([[1, 2, 3, 4, 5, 6]]).T.astype(tensor_dtype)
coords = np.array([
[0, 0, 2, 3, 1, 3],
[0, 2, 0, 4, 5, 5],
]).T.astype(index_dtype)
shape = (4, 6)
dim_names = ('x', 'y')
sparse_tensor = pa.SparseCOOTensor.from_numpy(data, coords,
shape, dim_names)
context = pa.default_serialization_context()
serialized = pa.serialize(sparse_tensor, context=context).to_buffer()
result = pa.deserialize(serialized)
assert_equal(result, sparse_tensor)
assert isinstance(result, pa.SparseCOOTensor)
data_result, coords_result = result.to_numpy()
assert np.array_equal(data_result, data)
assert np.array_equal(coords_result, coords)
assert result.dim_names == dim_names
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_coo_tensor_components_serialization(large_buffer,
index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([[1, 2, 3, 4, 5, 6]]).T.astype(tensor_dtype)
coords = np.array([
[0, 0, 2, 3, 1, 3],
[0, 2, 0, 4, 5, 5],
]).T.astype(index_dtype)
shape = (4, 6)
dim_names = ('x', 'y')
sparse_tensor = pa.SparseCOOTensor.from_numpy(data, coords,
shape, dim_names)
serialization_roundtrip(sparse_tensor, large_buffer)
@pytest.mark.skipif(not coo_matrix, reason="requires scipy")
def test_scipy_sparse_coo_tensor_serialization():
data = np.array([1, 2, 3, 4, 5, 6])
row = np.array([0, 0, 2, 3, 1, 3])
col = np.array([0, 2, 0, 4, 5, 5])
shape = (4, 6)
sparse_array = coo_matrix((data, (row, col)), shape=shape)
serialized = pa.serialize(sparse_array)
result = serialized.deserialize()
assert np.array_equal(sparse_array.toarray(), result.toarray())
@pytest.mark.skipif(not sparse, reason="requires pydata/sparse")
def test_pydata_sparse_sparse_coo_tensor_serialization():
data = np.array([1, 2, 3, 4, 5, 6])
coords = np.array([
[0, 0, 2, 3, 1, 3],
[0, 2, 0, 4, 5, 5],
])
shape = (4, 6)
sparse_array = sparse.COO(data=data, coords=coords, shape=shape)
serialized = pa.serialize(sparse_array)
result = serialized.deserialize()
assert np.array_equal(sparse_array.todense(), result.todense())
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_csr_matrix_serialization(index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(tensor_dtype)
indptr = np.array([0, 2, 3, 4, 6]).astype(index_dtype)
indices = np.array([0, 2, 5, 0, 4, 5]).astype(index_dtype)
shape = (4, 6)
dim_names = ('x', 'y')
sparse_tensor = pa.SparseCSRMatrix.from_numpy(data, indptr, indices,
shape, dim_names)
context = pa.default_serialization_context()
serialized = pa.serialize(sparse_tensor, context=context).to_buffer()
result = pa.deserialize(serialized)
assert_equal(result, sparse_tensor)
assert isinstance(result, pa.SparseCSRMatrix)
data_result, indptr_result, indices_result = result.to_numpy()
assert np.array_equal(data_result, data)
assert np.array_equal(indptr_result, indptr)
assert np.array_equal(indices_result, indices)
assert result.dim_names == dim_names
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_csr_matrix_components_serialization(large_buffer,
index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([8, 2, 5, 3, 4, 6]).astype(tensor_dtype)
indptr = np.array([0, 2, 3, 4, 6]).astype(index_dtype)
indices = np.array([0, 2, 5, 0, 4, 5]).astype(index_dtype)
shape = (4, 6)
dim_names = ('x', 'y')
sparse_tensor = pa.SparseCSRMatrix.from_numpy(data, indptr, indices,
shape, dim_names)
serialization_roundtrip(sparse_tensor, large_buffer)
@pytest.mark.skipif(not csr_matrix, reason="requires scipy")
def test_scipy_sparse_csr_matrix_serialization():
data = np.array([8, 2, 5, 3, 4, 6])
indptr = np.array([0, 2, 3, 4, 6])
indices = np.array([0, 2, 5, 0, 4, 5])
shape = (4, 6)
sparse_array = csr_matrix((data, indices, indptr), shape=shape)
serialized = pa.serialize(sparse_array)
result = serialized.deserialize()
assert np.array_equal(sparse_array.toarray(), result.toarray())
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_csc_matrix_serialization(index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(tensor_dtype)
indptr = np.array([0, 2, 3, 4, 6]).astype(index_dtype)
indices = np.array([0, 2, 5, 0, 4, 5]).astype(index_dtype)
shape = (6, 4)
dim_names = ('x', 'y')
sparse_tensor = pa.SparseCSCMatrix.from_numpy(data, indptr, indices,
shape, dim_names)
context = pa.default_serialization_context()
serialized = pa.serialize(sparse_tensor, context=context).to_buffer()
result = pa.deserialize(serialized)
assert_equal(result, sparse_tensor)
assert isinstance(result, pa.SparseCSCMatrix)
data_result, indptr_result, indices_result = result.to_numpy()
assert np.array_equal(data_result, data)
assert np.array_equal(indptr_result, indptr)
assert np.array_equal(indices_result, indices)
assert result.dim_names == dim_names
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_csc_matrix_components_serialization(large_buffer,
index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([8, 2, 5, 3, 4, 6]).astype(tensor_dtype)
indptr = np.array([0, 2, 3, 6]).astype(index_dtype)
indices = np.array([0, 2, 2, 0, 1, 2]).astype(index_dtype)
shape = (3, 3)
dim_names = ('x', 'y')
sparse_tensor = pa.SparseCSCMatrix.from_numpy(data, indptr, indices,
shape, dim_names)
serialization_roundtrip(sparse_tensor, large_buffer)
@pytest.mark.skipif(not csc_matrix, reason="requires scipy")
def test_scipy_sparse_csc_matrix_serialization():
data = np.array([8, 2, 5, 3, 4, 6])
indptr = np.array([0, 2, 3, 4, 6])
indices = np.array([0, 2, 5, 0, 4, 5])
shape = (6, 4)
sparse_array = csc_matrix((data, indices, indptr), shape=shape)
serialized = pa.serialize(sparse_array)
result = serialized.deserialize()
assert np.array_equal(sparse_array.toarray(), result.toarray())
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_csf_tensor_serialization(index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([[1, 2, 3, 4, 5, 6, 7, 8]]).T.astype(tensor_dtype)
indptr = [
np.array([0, 2, 3]),
np.array([0, 1, 3, 4]),
np.array([0, 2, 4, 5, 8]),
]
indices = [
np.array([0, 1]),
np.array([0, 1, 1]),
np.array([0, 0, 1, 1]),
np.array([1, 2, 0, 2, 0, 0, 1, 2]),
]
indptr = [x.astype(index_dtype) for x in indptr]
indices = [x.astype(index_dtype) for x in indices]
shape = (2, 3, 4, 5)
axis_order = (0, 1, 2, 3)
dim_names = ("a", "b", "c", "d")
for ndim in [2, 3, 4]:
sparse_tensor = pa.SparseCSFTensor.from_numpy(data, indptr[:ndim - 1],
indices[:ndim],
shape[:ndim],
axis_order[:ndim],
dim_names[:ndim])
context = pa.default_serialization_context()
serialized = pa.serialize(sparse_tensor, context=context).to_buffer()
result = pa.deserialize(serialized)
assert_equal(result, sparse_tensor)
assert isinstance(result, pa.SparseCSFTensor)
@pytest.mark.parametrize('tensor_type', tensor_types)
@pytest.mark.parametrize('index_type', index_types)
def test_sparse_csf_tensor_components_serialization(large_buffer,
index_type, tensor_type):
tensor_dtype = np.dtype(tensor_type)
index_dtype = np.dtype(index_type)
data = np.array([[1, 2, 3, 4, 5, 6, 7, 8]]).T.astype(tensor_dtype)
indptr = [
np.array([0, 2, 3]),
np.array([0, 1, 3, 4]),
np.array([0, 2, 4, 5, 8]),
]
indices = [
np.array([0, 1]),
np.array([0, 1, 1]),
np.array([0, 0, 1, 1]),
np.array([1, 2, 0, 2, 0, 0, 1, 2]),
]
indptr = [x.astype(index_dtype) for x in indptr]
indices = [x.astype(index_dtype) for x in indices]
shape = (2, 3, 4, 5)
axis_order = (0, 1, 2, 3)
dim_names = ("a", "b", "c", "d")
for ndim in [2, 3, 4]:
sparse_tensor = pa.SparseCSFTensor.from_numpy(data, indptr[:ndim - 1],
indices[:ndim],
shape[:ndim],
axis_order[:ndim],
dim_names[:ndim])
serialization_roundtrip(sparse_tensor, large_buffer)
@pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning")
def test_numpy_matrix_serialization(tmpdir):
class CustomType:
def __init__(self, val):
self.val = val
rec_type = np.dtype([('x', 'int64'), ('y', 'double'), ('z', '<U4')])
path = os.path.join(str(tmpdir), 'pyarrow_npmatrix_serialization_test.bin')
array = np.random.randint(low=-1, high=1, size=(2, 2))
for data_type in [str, int, float, rec_type, CustomType]:
matrix = np.matrix(array.astype(data_type))
with open(path, 'wb') as f:
f.write(pa.serialize(matrix).to_buffer())
serialized = pa.read_serialized(pa.OSFile(path))
result = serialized.deserialize()
assert_equal(result, matrix)
assert_equal(result.dtype, matrix.dtype)
serialized = None
assert_equal(result, matrix)
assert result.base is not None
def test_pyarrow_objects_serialization(large_buffer):
# NOTE: We have to put these objects inside,
# or it will affect 'test_total_bytes_allocated'.
pyarrow_objects = [
pa.array([1, 2, 3, 4]), pa.array(['1', 'never U+1F631', '',
"233 * U+1F600"]),
pa.array([1, None, 2, 3]),
pa.Tensor.from_numpy(np.random.rand(2, 3, 4)),
pa.RecordBatch.from_arrays(
[pa.array([1, None, 2, 3]),
pa.array(['1', 'never U+1F631', '', "233 * u1F600"])],
['a', 'b']),
pa.Table.from_arrays([pa.array([1, None, 2, 3]),
pa.array(['1', 'never U+1F631', '',
"233 * u1F600"])],
['a', 'b'])
]
for obj in pyarrow_objects:
serialization_roundtrip(obj, large_buffer)
def test_buffer_serialization():
class BufferClass:
pass
def serialize_buffer_class(obj):
return pa.py_buffer(b"hello")
def deserialize_buffer_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(
BufferClass, "BufferClass",
custom_serializer=serialize_buffer_class,
custom_deserializer=deserialize_buffer_class)
b = pa.serialize(BufferClass(), context=context).to_buffer()
assert pa.deserialize(b, context=context).to_pybytes() == b"hello"
@pytest.mark.skip(reason="extensive memory requirements")
def test_arrow_limits(self):
def huge_memory_map(temp_dir):
return large_memory_map(temp_dir, 100 * 1024 * 1024 * 1024)
with pa.memory_map(huge_memory_map, mode="r+") as mmap:
# Test that objects that are too large for Arrow throw a Python
# exception. These tests give out of memory errors on Travis and need
# to be run on a machine with lots of RAM.
x = 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * ["s"]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [["1"], 2, 3, [{"s": 4}]]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [{"s": 1}] + 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = np.zeros(2 ** 25)
serialization_roundtrip(x, mmap)
del x
x = [np.zeros(2 ** 18) for _ in range(2 ** 7)]
serialization_roundtrip(x, mmap)
del x
def test_serialization_callback_error():
class TempClass:
pass
# Pass a SerializationContext into serialize, but TempClass
# is not registered
serialization_context = pa.SerializationContext()
val = TempClass()
with pytest.raises(pa.SerializationCallbackError) as err:
serialized_object = pa.serialize(val, serialization_context)
assert err.value.example_object == val
serialization_context.register_type(TempClass, "TempClass")
serialized_object = pa.serialize(TempClass(), serialization_context)
deserialization_context = pa.SerializationContext()
# Pass a Serialization Context into deserialize, but TempClass
# is not registered
with pytest.raises(pa.DeserializationCallbackError) as err:
serialized_object.deserialize(deserialization_context)
assert err.value.type_id == "TempClass"
class TempClass2:
pass
# Make sure that we receive an error when we use an inappropriate value for
# the type_id argument.
with pytest.raises(TypeError):
serialization_context.register_type(TempClass2, 1)
def test_fallback_to_subclasses():
class SubFoo(Foo):
def __init__(self):
Foo.__init__(self)
# should be able to serialize/deserialize an instance
# if a base class has been registered
serialization_context = pa.SerializationContext()
serialization_context.register_type(Foo, "Foo")
subfoo = SubFoo()
# should fallbact to Foo serializer
serialized_object = pa.serialize(subfoo, serialization_context)
reconstructed_object = serialized_object.deserialize(
serialization_context
)
assert type(reconstructed_object) == Foo
class Serializable:
pass
def serialize_serializable(obj):
return {"type": type(obj), "data": obj.__dict__}
def deserialize_serializable(obj):
val = obj["type"].__new__(obj["type"])
val.__dict__.update(obj["data"])
return val
class SerializableClass(Serializable):
def __init__(self):
self.value = 3
def test_serialize_subclasses():
# This test shows how subclasses can be handled in an idiomatic way
# by having only a serializer for the base class
# This technique should however be used with care, since pickling
# type(obj) with couldpickle will include the full class definition
# in the serialized representation.
# This means the class definition is part of every instance of the
# object, which in general is not desirable; registering all subclasses
# with register_type will result in faster and more memory
# efficient serialization.
context = pa.default_serialization_context()
context.register_type(
Serializable, "Serializable",
custom_serializer=serialize_serializable,
custom_deserializer=deserialize_serializable)
a = SerializableClass()
serialized = pa.serialize(a, context=context)
deserialized = serialized.deserialize(context=context)
assert type(deserialized).__name__ == SerializableClass.__name__
assert deserialized.value == 3
def test_serialize_to_components_invalid_cases():
buf = pa.py_buffer(b'hello')
components = {
'num_tensors': 0,
'num_sparse_tensors': {
'coo': 0, 'csr': 0, 'csc': 0, 'csf': 0, 'ndim_csf': 0
},
'num_ndarrays': 0,
'num_buffers': 1,
'data': [buf]
}
with pytest.raises(pa.ArrowInvalid):
pa.deserialize_components(components)
components = {
'num_tensors': 0,
'num_sparse_tensors': {
'coo': 0, 'csr': 0, 'csc': 0, 'csf': 0, 'ndim_csf': 0
},
'num_ndarrays': 1,
'num_buffers': 0,
'data': [buf, buf]
}
with pytest.raises(pa.ArrowInvalid):
pa.deserialize_components(components)
def test_deserialize_components_in_different_process():
arr = pa.array([1, 2, 5, 6], type=pa.int8())
ser = pa.serialize(arr)
data = pickle.dumps(ser.to_components(), protocol=-1)
code = """if 1:
import pickle
import pyarrow as pa
data = {!r}
components = pickle.loads(data)
arr = pa.deserialize_components(components)
assert arr.to_pylist() == [1, 2, 5, 6], arr
""".format(data)
subprocess_env = test_util.get_modified_env_with_pythonpath()
print("** sys.path =", sys.path)
print("** setting PYTHONPATH to:", subprocess_env['PYTHONPATH'])
subprocess.check_call([sys.executable, "-c", code], env=subprocess_env)
def test_serialize_read_concatenated_records():
# ARROW-1996 -- see stream alignment work in ARROW-2840, ARROW-3212
f = pa.BufferOutputStream()
pa.serialize_to(12, f)
pa.serialize_to(23, f)
buf = f.getvalue()
f = pa.BufferReader(buf)
pa.read_serialized(f).deserialize()
pa.read_serialized(f).deserialize()
def deserialize_regex(serialized, q):
import pyarrow as pa
q.put(pa.deserialize(serialized))
def test_deserialize_in_different_process():
from multiprocessing import Process, Queue
import re
regex = re.compile(r"\d+\.\d*")
serialization_context = pa.SerializationContext()
serialization_context.register_type(type(regex), "Regex", pickle=True)
serialized = pa.serialize(regex, serialization_context)
serialized_bytes = serialized.to_buffer().to_pybytes()
q = Queue()
p = Process(target=deserialize_regex, args=(serialized_bytes, q))
p.start()
assert q.get().pattern == regex.pattern
p.join()
def test_deserialize_buffer_in_different_process():
import tempfile
f = tempfile.NamedTemporaryFile(delete=False)
b = pa.serialize(pa.py_buffer(b'hello')).to_buffer()
f.write(b.to_pybytes())
f.close()
test_util.invoke_script('deserialize_buffer.py', f.name)
def test_set_pickle():
# Use a custom type to trigger pickling.
class Foo:
pass
context = pa.SerializationContext()
context.register_type(Foo, 'Foo', pickle=True)
test_object = Foo()
# Define a custom serializer and deserializer to use in place of pickle.
def dumps1(obj):
return b'custom'
def loads1(serialized_obj):
return serialized_obj + b' serialization 1'
# Test that setting a custom pickler changes the behavior.
context.set_pickle(dumps1, loads1)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 1'
# Define another custom serializer and deserializer.
def dumps2(obj):
return b'custom'
def loads2(serialized_obj):
return serialized_obj + b' serialization 2'
# Test that setting another custom pickler changes the behavior again.
context.set_pickle(dumps2, loads2)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 2'
def test_path_objects(tmpdir):
# Test compatibility with PEP 519 path-like objects
p = pathlib.Path(tmpdir) / 'zzz.bin'
obj = 1234
pa.serialize_to(obj, p)
res = pa.deserialize_from(p, None)
assert res == obj
def test_tensor_alignment():
# Deserialized numpy arrays should be 64-byte aligned.
x = np.random.normal(size=(10, 20, 30))
y = pa.deserialize(pa.serialize(x).to_buffer())
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i) for i in range(100)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (1,)) for i in range(20)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (5,)) for i in range(1, 8)]
xs = [xs[i][(i + 1) * (slice(1, 3),)] for i in range(len(xs))]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
def test_empty_tensor():
# ARROW-8122, serialize and deserialize empty tensors
x = np.array([], dtype=np.float64)
y = pa.deserialize(pa.serialize(x).to_buffer())
np.testing.assert_array_equal(x, y)
x = np.array([[], [], []], dtype=np.float64)
y = pa.deserialize(pa.serialize(x).to_buffer())
np.testing.assert_array_equal(x, y)
x = np.array([[], [], []], dtype=np.float64).T
y = pa.deserialize(pa.serialize(x).to_buffer())
np.testing.assert_array_equal(x, y)
def test_serialization_determinism():
for obj in COMPLEX_OBJECTS:
buf1 = pa.serialize(obj).to_buffer()
buf2 = pa.serialize(obj).to_buffer()
assert buf1.to_pybytes() == buf2.to_pybytes()
def test_serialize_recursive_objects():
class ClassA:
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Make a numpy array that contains itself.
arr = np.array([None], dtype=object)
arr[0] = arr
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1, arr]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
pa.serialize(obj).deserialize()
|
blast_24cores.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread, Event
import time
# import threading
import psutil
import datetime
import os
import subprocess
from subprocess import PIPE,Popen
# sizes = [0.015625, 0.03125, 0.0625, 0.125, 0.25, 0.5]
size = 0.015625
n_cores = 24
# command = "blastn -db nt -evalue 1e-05 -query arquivo.fasta -out arquivoblast"
#monitor cpu and memory
# [1:09 PM, 3/14/2016] Mauro: Monitorar o desempenho das máquinas (se alcançam o máximo de CPU ou memória; se travam)
# [1:10 PM, 3/14/2016] Mauro: E verificar a relação (tamanho, no de reads, no de hits) entre os arquivos de entrada e saída.
# Raony, sugiro:
# 1) Pega 1 dos arquivos 'good', quebra ele em diferentes tamanhos: 50, 25, 12.5, 6.25, 3.125 1,5625% do original
# 2) Roda cada um em um webservice diferente, em instâncias padrão da AWS de aproximadamente 8, 20 e 50 Gb de RAM, com o processamento correspondente.
# 3) monitore: tempo de processamento em cada instância, uso médio da CPU e da RAM, tamanho do arquivo de saída.
# 4) quando fragmentar o arquivo inicial em pedaços de 6,25% do total, coloque 8 deles (~50%) na fila do mesmo webservice pra monitorar o tempo de execução e comparar com 1 arquivo de 50%
file_prefix = str(size).replace('.','_')
output = open("monitor_%s.%s_cores.log" % (file_prefix, n_cores), "w")
def monitor(arg1, stop_event):
while(not stop_event.is_set()):
time.sleep(60)
cpu = psutil.cpu_percent(interval=1)
mem = psutil.virtual_memory()
output_list = []
output_list.append("DATE:"+str(datetime.datetime.now()))
used = mem.total - mem.available
output_list.append("CPU:"+str(cpu))
output_list.append("MEMORY:"+str(int(used / 1024 / 1024))+" MB")
output.writelines("\t".join(output_list)+"\n")
print(output_list)
t2_stop= Event()
monitor = Thread(target=monitor, args=(2, t2_stop))
monitor.start()
#run blasts
# for size in sizes:
print("Running BLAST for %s \n" % (size))
output.writelines("Running BLAST for %s \n" % (size))
filename = "input_blast_%s.fasta" % (file_prefix)
output_file = "/tmp/output_%s.%s_cores.blast_output" % (file_prefix, n_cores)
command = "time /home/raonyguimaraes/programs/ncbi-blast-2.3.0+/bin/blastn -db /home/raonyguimaraes/blast_db/nt -evalue 1e-05 -query ../%s -out %s -num_threads %s" % (filename, output_file, n_cores)
proc = Popen(command.split(), stdout=PIPE)
out = proc.communicate()[0]
print(out)
output.writelines(out)
command = "cp %s ." % (output_file)
proc = Popen(command.split(), stdout=PIPE)
out = proc.communicate()[0]
command = "rm %s" % (output_file)
proc = Popen(command.split(), stdout=PIPE)
out = proc.communicate()[0]
#stop monitor
t2_stop.set()
monitor.join()
output.close()
|
database_tests.py
|
import psycopg2
from threading import Thread
def get_conn():
return psycopg2.connect(
"dbname='dingo' user='postgres' host='0.0.0.0' password='rrferl' ")
class Kon:
def __init__(self):
self.conn = get_conn()
self.cur = self.conn.cursor()
def __enter__(self):
return self.cur
def __exit__(self, type, value, traceback):
self.cur.close()
self.conn.close()
def create_tables():
conn = get_conn()
cur = conn.cursor()
return cur.execute("""
begin;
create table if not exists invoice6 (id int primary key, data int);
create table if not exists water5 (id int);
insert into water5 (id) select 0 where not exists(select id from water5 limit 1);
commit;
""")
conn.close()
def simple_select(num):
conn = get_conn()
cur = conn.cursor()
cur.execute("""
with x as (update water5 set id = id+1 returning *)
insert into invoice6 select * from x returning *;
""")
rows = cur.fetchall()
for i in rows:
print(i)
conn.close()
return rows
def race_condition1(num):
with Kon() as cur:
cur.execute("""
begin;
select * from invoice6 order by id limit 1;
""")
row1 = cur.fetchone()[0]
cur.execute("""
update invoice6 set data = %s where id = %s;
""", (num, row1))
cur.execute("""
select data from invoice6 where id = %s;
""", (row1, ))
row2 = cur.fetchone()
cur.execute('commit;')
print(row2, num)
assert row2[0] == num
if __name__ == "__main__":
create_tables()
threads = []
for i in range(16):
x = Thread(target=race_condition1, args=(i,))
threads.append(x)
[i.start() for i in threads]
[i.join() for i in threads]
|
test_dummyaccount_demo.py
|
import asyncio
from threading import Thread
import pytest
from tests.utils import (
cleanup,
connect,
)
from .dummy_account_node import DummyAccountNode
# pylint: disable=too-many-locals
def create_setup_in_new_thread_func(dummy_node):
def setup_in_new_thread():
asyncio.ensure_future(dummy_node.setup_crypto_networking())
return setup_in_new_thread
async def perform_test(num_nodes, adjacency_map, action_func, assertion_func):
"""
Helper function to allow for easy construction of custom tests for dummy account nodes
in various network topologies
:param num_nodes: number of nodes in the test
:param adjacency_map: adjacency map defining each node and its list of neighbors
:param action_func: function to execute that includes actions by the nodes,
such as send crypto and set crypto
:param assertion_func: assertions for testing the results of the actions are correct
"""
# Create nodes
dummy_nodes = []
for i in range(num_nodes):
dummy_nodes.append(await DummyAccountNode.create())
# Create network
for source_num in adjacency_map:
target_nums = adjacency_map[source_num]
for target_num in target_nums:
await connect(dummy_nodes[source_num].libp2p_node, \
dummy_nodes[target_num].libp2p_node)
# Allow time for network creation to take place
await asyncio.sleep(0.25)
# Start a thread for each node so that each node can listen and respond
# to messages on its own thread, which will avoid waiting indefinitely
# on the main thread. On this thread, call the setup func for the node,
# which subscribes the node to the CRYPTO_TOPIC topic
for dummy_node in dummy_nodes:
thread = Thread(target=create_setup_in_new_thread_func(dummy_node))
thread.run()
# Allow time for nodes to subscribe to CRYPTO_TOPIC topic
await asyncio.sleep(0.25)
# Perform action function
await action_func(dummy_nodes)
# Allow time for action function to be performed (i.e. messages to propogate)
await asyncio.sleep(1)
# Perform assertion function
for dummy_node in dummy_nodes:
assertion_func(dummy_node)
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio
async def test_simple_two_nodes():
num_nodes = 2
adj_map = {0: [1]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("aspyn", 10)
def assertion_func(dummy_node):
assert dummy_node.get_balance("aspyn") == 10
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_simple_three_nodes_line_topography():
num_nodes = 3
adj_map = {0: [1], 1: [2]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("aspyn", 10)
def assertion_func(dummy_node):
assert dummy_node.get_balance("aspyn") == 10
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_simple_three_nodes_triangle_topography():
num_nodes = 3
adj_map = {0: [1, 2], 1: [2]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("aspyn", 20)
def assertion_func(dummy_node):
assert dummy_node.get_balance("aspyn") == 20
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_simple_seven_nodes_tree_topography():
num_nodes = 7
adj_map = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("aspyn", 20)
def assertion_func(dummy_node):
assert dummy_node.get_balance("aspyn") == 20
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_set_then_send_from_root_seven_nodes_tree_topography():
num_nodes = 7
adj_map = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("aspyn", 20)
await asyncio.sleep(0.25)
await dummy_nodes[0].publish_send_crypto("aspyn", "alex", 5)
def assertion_func(dummy_node):
assert dummy_node.get_balance("aspyn") == 15
assert dummy_node.get_balance("alex") == 5
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_set_then_send_from_different_leafs_seven_nodes_tree_topography():
num_nodes = 7
adj_map = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
async def action_func(dummy_nodes):
await dummy_nodes[6].publish_set_crypto("aspyn", 20)
await asyncio.sleep(0.25)
await dummy_nodes[4].publish_send_crypto("aspyn", "alex", 5)
def assertion_func(dummy_node):
assert dummy_node.get_balance("aspyn") == 15
assert dummy_node.get_balance("alex") == 5
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_simple_five_nodes_ring_topography():
num_nodes = 5
adj_map = {0: [1], 1: [2], 2: [3], 3: [4], 4: [0]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("aspyn", 20)
def assertion_func(dummy_node):
assert dummy_node.get_balance("aspyn") == 20
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_set_then_send_from_diff_nodes_five_nodes_ring_topography():
num_nodes = 5
adj_map = {0: [1], 1: [2], 2: [3], 3: [4], 4: [0]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("alex", 20)
await asyncio.sleep(0.25)
await dummy_nodes[3].publish_send_crypto("alex", "rob", 12)
def assertion_func(dummy_node):
assert dummy_node.get_balance("alex") == 8
assert dummy_node.get_balance("rob") == 12
await perform_test(num_nodes, adj_map, action_func, assertion_func)
@pytest.mark.asyncio
async def test_set_then_send_from_five_diff_nodes_five_nodes_ring_topography():
num_nodes = 5
adj_map = {0: [1], 1: [2], 2: [3], 3: [4], 4: [0]}
async def action_func(dummy_nodes):
await dummy_nodes[0].publish_set_crypto("alex", 20)
await asyncio.sleep(1)
await dummy_nodes[1].publish_send_crypto("alex", "rob", 3)
await asyncio.sleep(1)
await dummy_nodes[2].publish_send_crypto("rob", "aspyn", 2)
await asyncio.sleep(1)
await dummy_nodes[3].publish_send_crypto("aspyn", "zx", 1)
await asyncio.sleep(1)
await dummy_nodes[4].publish_send_crypto("zx", "raul", 1)
def assertion_func(dummy_node):
assert dummy_node.get_balance("alex") == 17
assert dummy_node.get_balance("rob") == 1
assert dummy_node.get_balance("aspyn") == 1
assert dummy_node.get_balance("zx") == 0
assert dummy_node.get_balance("raul") == 1
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
automated_driving_with_fusion2_2.py
|
"""Defines SimpleSensorFusionControl class
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 Cumhur Erkan Tuncali, Georgios Fainekos, Danil Prokhorov, Hisahiro Ito, James Kapinski.
For questions please contact:
C. Erkan Tuncali (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import math
import numpy as np
import threading
import pickle
from Sim_ATAV.common.controller_communication_interface import ControllerCommunicationInterface
from Sim_ATAV.vehicle_control.base_controller.base_controller import BaseCarController
from Sim_ATAV.vehicle_control.controller_commons import controller_commons
from Sim_ATAV.vehicle_control.controller_commons.path_following_tools import PathFollowingTools
from Sim_ATAV.vehicle_control.controller_commons.perception.sensor_fusion.sensor_fusion_tracker \
import SensorFusionTracker
from Sim_ATAV.vehicle_control.controller_commons.planning.target_speed_planner import TargetSpeedPlanner,\
TargetSpeedData
from Sim_ATAV.vehicle_control.generic_stanley_controller.generic_stanley_controller \
import GenericStanleyController
from Sim_ATAV.vehicle_control.generic_pid_controller.generic_pid_controller import GenericPIDController
from Sim_ATAV.vehicle_control.controller_commons.visualization.camera_info_display import CameraInfoDisplay
WORLD_TIME_STEP_MS = 10
HAS_DEBUG_DISPLAY = True
SENSOR_TYPE = 'Actual' # 'Actual', 'Perfect'
DEBUG_MODE = False
# Our global variables
target_throttle = [0.5, 0.55, 0.65, 0.75, 0.85, 0.95, 0.8, 0.9, 0.95, 1.0, 1.0, 0.65, 0.7, 0.7, 0.7, 0.75, 0.6, 0.6, 0.6, 0.35, 0.35, -0.3, -0.3, -0.3, -0.4, -0.4, -0.4, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.1, 0.05, 0.05, -0.1, 0.25, 0.05, 0.05, -0.1, 0.15, 0.15, 0.55, 0.6, 0.7, 0.85, 0.95, 0.8, 0.85, 0.9, 0.95, 1.0, 1.0, 0.7, 0.7, 0.7, 0.75, 0.6, 0.6, 0.6, 0.35, 0.35, 0.35, -0.3, -0.3, -0.3, -0.4, -0.4, -0.4, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.1, 0.05, 0.15, 0.05, 0.05, 0.05, -0.1]
target_t = [0.77, 1.32, 1.77, 2.16, 2.51, 2.83, 3.13, 3.41, 3.68, 3.94, 4.19, 4.43, 4.67, 4.91, 5.14, 5.37, 5.59, 5.81, 6.03, 6.25, 6.47, 6.69, 6.93, 7.18, 7.45, 7.74, 8.06, 8.41, 8.79, 9.21, 9.68, 10.22, 10.9, 11.75, 12.66, 13.57, 14.58, 15.51, 16.3, 17.09, 17.94, 18.79, 19.54, 20.14, 20.62, 21.03, 21.39, 21.72, 22.03, 22.32, 22.6, 22.87, 23.12, 23.37, 23.61, 23.85, 24.08, 24.31, 24.53, 24.75, 24.97, 25.19, 25.41, 25.63, 25.85, 26.09, 26.34, 26.61, 26.9, 27.22, 27.57, 27.95, 28.37, 28.84, 29.38, 30.06, 30.91, 31.82, 32.67, 33.46, 34.25, 35.04, 35.89]
exp_out = [[]]
time_index = 0
img_cnt = 346
data_dict = {}
inf = 1e9
def debug_print(print_str):
if DEBUG_MODE:
print(print_str)
sys.stdout.flush()
class PathAndSpeedFollower(BaseCarController):
"""PathAndSpeedFollower class is a car controller class for Webots."""
CAMERA_TO_FRONT_DISTANCE = 2.3 # 2.3 m is the distance from Prius top sensor location to the very front of the car
LIDAR_TO_FRONT_DISTANCE = 2.3
CAMERA_MAIN_RELATIVE_POSITION = [0.0, 1.3]
LIDAR_MAIN_RELATIVE_POSITION = [0.0, 1.3]
RADAR_FRONT_RELATIVE_POSITION = [0.0, 3.6]
FRONT_TO_REAR_WHEELS_DISTANCE = 3.6 # Approximate (this is intentially longer than the actual wheel base
# for smoother operation)
CAMERA_LOCAL_COORDINATES = [0.0, 1.3, 1.1]
CAMERA_X_ROT_ANGLE = -0.01
CAMERA_LOCAL_ROTATION = np.array([[1.0, 0.0, 0.0],
[0.0, math.cos(CAMERA_X_ROT_ANGLE), -math.sin(CAMERA_X_ROT_ANGLE)],
[0.0, math.sin(CAMERA_X_ROT_ANGLE), math.cos(CAMERA_X_ROT_ANGLE)]])
CAR_FRONT_TRIANGLE_LINE1_M = -192/126 # old value: -0.6 # Line 1 m for front triangle.
CAR_FRONT_TRIANGLE_LINE1_B = 1142.9 # Old value: 526 # Line 1 b for front triangle.
CAR_FRONT_TRIANGLE_LINE2_M = 192/126 # old value: 0.6 # Line 2 m for front triangle.
CAR_FRONT_TRIANGLE_LINE2_B = -758.9 # Old value: -202 # Line 2 b for front triangle.
PED_FRONT_TRIANGLE_LINE1_M = -192/204 # old value: -0.6 # Line 1 m for front triangle.
PED_FRONT_TRIANGLE_LINE1_B = 779.3 # Old value: 526 # Line 1 b for front triangle.
PED_FRONT_TRIANGLE_LINE2_M = 192/204 # old value: 0.6 # Line 2 m for front triangle.
PED_FRONT_TRIANGLE_LINE2_B = -395.3 # Old value: -202 # Line 2 b for front triangle.
CLASSIFIER_PERIOD_MS = 100
LIDAR_PERIOD_MS = 200
RADAR_PERIOD_MS = 200
MIN_EMERGENCY_BRAKE_DURATION_MS = 100.0
MEASURE_EXEC_TIME = False
LANE_WIDTH = 3.5
MIN_STEERING_MANEUVER_MS = 2000.0
EMERGENCY_STEERING_TTC = 1.0
OBJECT_TRACKER_MAX_DISTANCE = 70.0
def __init__(self, controller_parameters):
(car_model, target_speed_m_s, is_direct_speed_control, target_lat_pos, self_vhc_id, slow_at_intersection,
use_fusion) = controller_parameters
BaseCarController.__init__(self, car_model)
self.slow_at_intersection = slow_at_intersection in ('True', 'true', 'yes', 'Yes')
self.is_direct_speed_control = is_direct_speed_control in ('True', 'true', 'yes', 'Yes')
self.use_fusion = use_fusion in ('True', 'true', 'yes', 'Yes')
self.camera_device_name = 'camera'
self.camera = None
self.compass_device_name = 'compass'
self.compass = None
self.display_device_name = 'display'
self.display = None
self.camera_info_display = None
self.sensor_display_device_name = 'sensor_display'
self.sensor_display = None
self.sensor_info_display = None
self.gps_device_name = 'gps'
self.gps = None
self.receiver_device_name = 'receiver'
self.receiver = None
self.emitter_device_name = 'emitter'
self.emitter = None
self.lidar_main_device_name = 'velodyne' # ibeo', 'velodyne'
self.lidar_main = None
self.radar_front_device_name = 'radar'
self.radar_front = None
self.target_speed_m_s = float(target_speed_m_s)
self.classifier = None
self.classification_client = None
self.obj_tracker = None
self.ground_truth_generator = None
self.contr_comm = ControllerCommunicationInterface()
self.target_lat_pos = float(target_lat_pos)
self.target_bearing = 0.0
self.lateral_controller = GenericStanleyController()
self.lateral_controller.k = 0.5
self.lateral_controller.k2 = 0.4
self.lateral_controller.k3 = 1.1
self.lateral_controller.set_output_range(-0.8, 0.8)
self.longitudinal_controller = GenericPIDController(0.15, 0.01, 0.0)
self.longitudinal_controller.set_integrator_value_range(-20.0, 20.0)
self.self_vhc_id = int(self_vhc_id)
self.path_following_tools = PathFollowingTools()
self.self_sensor_fusion_tracker = None
self.last_segment_ind = 0
self.self_current_state = [0.0, 0.0, 0.0, 0.0, 0.0]
self.last_segment_ind = 0
self.detour_start_time = None
self.target_speed_planner = TargetSpeedPlanner(default_speed=self.target_speed_m_s)
print('AutomatedDrivingControl Initialized: {}, {}'.format(car_model, self.target_speed_m_s))
def start_devices(self):
"""Start the devices on the car and initialize objects like classifier."""
# Start camera and the sensors:
self.camera = self.getCamera(self.camera_device_name)
if self.camera is not None:
self.camera.enable(self.CLASSIFIER_PERIOD_MS)
self.camera_info_display = CameraInfoDisplay(self.display)
self.gps = self.getGPS(self.gps_device_name)
if self.gps is not None:
self.gps.enable(WORLD_TIME_STEP_MS)
self.compass = self.getCompass(self.compass_device_name)
if self.compass is not None:
self.compass.enable(WORLD_TIME_STEP_MS)
self.receiver = self.getReceiver(self.receiver_device_name)
if self.receiver is not None:
self.receiver.enable(WORLD_TIME_STEP_MS)
self.emitter = self.getEmitter(self.emitter_device_name)
# Start the car engine
self.start_car()
def run(self):
"""Runs the controller."""
self.start_devices()
print("Devices Started.")
sys.stdout.flush()
def get_self_position():
"""Returns current self position."""
return self.self_current_state[0:2]
def get_self_speed_ms():
"""Returns current speed in m/s."""
return self.self_current_state[2]
def get_self_yaw_angle():
"""Returns self yaw angle in radians."""
return self.self_current_state[3]
# Internal functions to keep the code more readable:
def read_gps_sensor(gps_device):
"""Reads GPS sensor."""
if gps_device is not None:
sensor_gps_speed_m_s = gps_device.getSpeed()
sensor_gps_position_m = gps_device.getValues()
else:
sensor_gps_speed_m_s = 0.0
sensor_gps_position_m = [0.0, 0.0, 0.0]
return sensor_gps_position_m, sensor_gps_speed_m_s
def read_compass_sensor(compass_device):
"""Reads Compass Sensor."""
if compass_device is not None:
sensor_compass_bearing_rad = controller_commons.get_bearing(compass_device)
else:
sensor_compass_bearing_rad = 0.0
return sensor_compass_bearing_rad
def compute_and_apply_control():
"""Computes control output using the detected objects from sensor suite."""
cur_position = get_self_position()
cur_speed_ms = get_self_speed_ms()
cur_yaw_angle = get_self_yaw_angle()
# Compute control
if self.path_following_tools.target_path is not None:
# Compute distance from front wheels for smoother turns:
temp_cur_pos = [cur_position[0] - (self.FRONT_TO_REAR_WHEELS_DISTANCE * math.sin(cur_yaw_angle) +
cur_speed_ms * 0.2 * math.sin(cur_yaw_angle)),
cur_position[1] + (self.FRONT_TO_REAR_WHEELS_DISTANCE * math.cos(cur_yaw_angle) +
cur_speed_ms * 0.2 * math.cos(cur_yaw_angle))]
(current_segment_ind, line_segment_as_list, nearest_pos_on_path, dist_to_seg_end) = \
self.path_following_tools.get_current_segment(temp_cur_pos, self.last_segment_ind)
(distance_err, angle_err) = \
self.path_following_tools.get_distance_and_angle_error(temp_cur_pos,
cur_yaw_angle,
last_segment_ind=self.last_segment_ind,
is_detouring=False)
self.last_segment_ind = current_segment_ind
if len(self.path_following_tools.path_details) > current_segment_ind:
(next_turn_angle, travel_distance) = self.path_following_tools.path_details[current_segment_ind]
travel_distance += dist_to_seg_end
else:
(next_turn_angle, travel_distance) = (0.0, 0.0)
else:
current_segment_ind = -1
angle_err = self.target_bearing - cur_yaw_angle
while angle_err > math.pi:
angle_err -= 2*math.pi
while angle_err < -math.pi:
angle_err += 2*math.pi
distance_err = -(self.target_lat_pos - cur_position[0])
(next_turn_angle, travel_distance) = (0.0, 0.0)
current_target_speed = \
self.target_speed_planner.get_current_target_speed(cur_time_ms=cur_time_ms,
cur_segment_ind=current_segment_ind)
if self.slow_at_intersection and abs(next_turn_angle) > math.pi/60 and travel_distance < 100.0:
turn_ratio = min(1.0, abs(next_turn_angle)/(math.pi/4.0))
max_speed_limit = 10.0 + ((1.0 - turn_ratio)*30.0)
# decrease speed limit as we approach to the intersection.
max_speed_limit = (max_speed_limit + (current_target_speed - max_speed_limit) *
((max(travel_distance, 10.0)-10.0)/80.0))
else:
max_speed_limit = current_target_speed
control_steering = self.lateral_controller.compute(angle_err,
distance_err,
cur_speed_ms)
speed_ratio = min(1.0, self.self_current_state[2]/22.0)
max_steering = 0.1 + (1.0 - speed_ratio)*0.7
control_steering = min(max(-max_steering, control_steering), max_steering)
if self.is_direct_speed_control:
# self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(10.0), angle=control_steering)
'''
v = 0.1
t = 0.3
global t1, v1, flag
if cur_time_ms==100:
self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(v), angle=control_steering)
elif cur_time_ms>=5000:
self.set_throttle(t)
# if cur_time_ms%200==0:
# print("time: "+str(cur_time_ms)+" vel: "+str(cur_speed_ms))
if abs(round(cur_speed_ms,0)-cur_speed_ms)<0.01:
t1 = cur_time_ms
v1 = cur_speed_ms
# print ("--> "+str(t1))
if cur_time_ms-t1 in (100,200,300,400,500,600,700,800,900,1000):
a = ((cur_speed_ms-v1)/(cur_time_ms-t1))*1000
# print("time: "+str(cur_time_ms)+" diff: "+str(cur_time_ms-t1)+" speed: "+str(round(v1,2)) + " acc: "+str(round(a,2)))
'''
# if cur_time_ms-t1 == 1000:
# a = ((cur_speed_ms-v1)/(cur_time_ms-t1))*1000
# print("time: "+str(cur_time_ms)+" diff: "+str(cur_time_ms-t1)+" speed: "+str(round(v1,2)) + " acc: "+str(round(a,2)))
if cur_time_ms<1010:
x = 5.0
self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(x),angle=control_steering)
else:
global time_index
if(target_t[time_index] < ((cur_time_ms/1000.0) -1.0) ):
time_index = time_index + 1
# x2 = exp_out[time_index][0]
# y2 = exp_out[time_index][1]
inc = 0.0
# if(time_index>0):
# t1 = exp_out[time_index-1][4]
# dt = cur_time_ms/1000.0 - 3 - t1
# x1 = exp_out[time_index-1][0]
# u1 = exp_out[time_index-1][3]
# a2 = exp_out[time_index][2]
# dx = u1*dt + 0.5*a2*dt*dt
# if(abs(x2-x1)==5.0):
# if( (dx-0.5)/abs(x2-x1)>(cur_position[1]-x1)/(x2-x1) ):
# inc = 0.05
# elif( (dx+0.5)/abs(x2-x1)<(cur_position[1]-x1)/(x2-x1) ):
# inc = -0.05
# else:
# inc = 0.0
# if(target_throttle[time_index])
self.set_throttle_and_steering_angle(target_throttle[time_index]+inc, control_steering)
if cur_time_ms%100==0:
global img_cnt
img_name = "img_"+str(img_cnt)+".png"
self.camera.saveImage("../../../images/"+img_name,1)
img_cnt = img_cnt + 1
data_dict[img_name] = [cur_speed_ms,target_throttle[time_index],control_steering]
# self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(min(max_speed_limit,
# current_target_speed)),
# angle=control_steering)
if cur_time_ms%500==0:
print("Time: "+str(cur_time_ms)+" Agent vehicle speed: "+str(cur_speed_ms) + " pos: "+str(cur_position))
else:
control_throttle = self.longitudinal_controller.compute(min(max_speed_limit, current_target_speed)
- cur_speed_ms)
self.set_throttle_and_steering_angle(control_throttle, control_steering)
if current_target_speed < 0.0:
# Emergency / sudden braking
self.set_brake(1.0)
self.set_throttle(0.0)
while self.step() >= 0:
sim_time = self.get_sim_time()
cur_time_ms = int(round(1000 * sim_time))
# -------------- Read Sensors----------------
# ************ Read GPS ************
(sensor_gps_position_m, sensor_gps_speed_m_s) = read_gps_sensor(self.gps)
# ************ Read Compass ************
sensor_compass_bearing_rad = read_compass_sensor(self.compass)
# -------------- Sensor Fusion ----------------
# ************ Sensor Fusion for own states (GPS + Compass) ************
if self.self_sensor_fusion_tracker is None:
self.self_current_state = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s,
sensor_compass_bearing_rad, 0.0]
if sensor_gps_speed_m_s > 50.0 or sensor_gps_speed_m_s < -20.0: # Filter out errors in read gps speed
sensor_gps_speed_m_s = 0.0
self.self_current_state[2] = sensor_gps_speed_m_s
if self.use_fusion:
# Initiate self sensor fusion tracker
self.self_sensor_fusion_tracker = SensorFusionTracker(initial_state_mean=self.self_current_state,
filter_type='ukf')
else:
if self.gps is not None and self.compass is not None:
measurement = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s,
sensor_compass_bearing_rad]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_GPS_COMPASS)
elif self.gps is not None:
measurement = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_GPS)
elif self.compass is not None:
measurement = [sensor_compass_bearing_rad]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_COMPASS)
else:
self.self_current_state = [0.0, 0.0, 0.0, 0.0, 0.0]
# Read sensor-like information from Simulation Supervisor
if self.receiver is not None:
messages = self.contr_comm.receive_all_communication(self.receiver)
command_list = self.contr_comm.extract_all_commands_from_message(messages)
path_modified = False
for command_item in command_list:
command = command_item[0]
if command == ControllerCommunicationInterface.SET_CONTROLLER_PARAMETERS_MESSAGE:
parameter = command_item[1]
if parameter.get_vehicle_id() == self.self_vhc_id:
if parameter.get_parameter_name() == 'target_position':
parameter_data = parameter.get_parameter_data()
# print(parameter_data)
self.path_following_tools.add_point_to_path(parameter_data)
path_modified = True
elif parameter.get_parameter_name() == 'target_speed_at_time':
# 1st parameter is the start time for the target speed in seconds as float.
# 2nd: how long will the target speed be active in seconds -1 for infinite/until next.
# 3rd parameter is the target speed.
parameter_data = parameter.get_parameter_data()
if parameter_data[1] < 0:
target_length = math.inf
else:
target_length = int(round(1000 * parameter_data[1]))
self.target_speed_planner.add_target_speed_data(
TargetSpeedData(event_type='time',
start_time=int(round(1000 * parameter_data[0])),
length=target_length,
target_speed=parameter_data[2]))
elif parameter.get_parameter_name() == 'target_speed_at_segment':
# 1st parameter is the start segment index for the target speed.
# 2nd: how long will the target speed be active in seconds:
# -1 for infinite/until next, 0 for during the segment
# 3rd parameter is the target speed.
parameter_data = parameter.get_parameter_data()
if parameter_data[1] < 0:
target_length = -1
else:
target_length = int(round(1000 * parameter_data[1]))
self.target_speed_planner.add_target_speed_data(
TargetSpeedData(event_type='segment',
start_time=int(round(parameter_data[0])),
length=target_length,
target_speed=parameter_data[2]))
if path_modified:
self.path_following_tools.smoothen_the_path()
self.path_following_tools.populate_the_path_with_details()
# print(self.path_following_tools.target_path)
#----------Dynamic Path computation starts-------------------------
'''
if(cur_time_ms == 10):
cur_position = get_self_position()
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
global suboptimalPath
if (cur_time_ms == 8000):
t1.join()
self.path_following_tools.target_path = None
self.path_following_tools.path_details = None
for pt in suboptimalPath:
self.path_following_tools.add_point_to_path(pt)
self.path_following_tools.smoothen_the_path()
self.path_following_tools.populate_the_path_with_details()
cur_position = suboptimalPath[-1]
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
elif (cur_time_ms % 8000 == 0):
t1.join()
# print(suboptimalPath)
# cur_position = get_self_position()
# (cur_seg,line_seg,nearest_pos,dis) = self.path_following_tools.get_current_segment(cur_position,0,self.path_following_tools.target_path)
self.path_following_tools.target_path = self.path_following_tools.future_target_path
self.path_following_tools.path_details = self.path_following_tools.future_path_details
cur_position = suboptimalPath[-1]
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
'''
#---------Dynamic Path computation end--------------------
compute_and_apply_control()
out_file = "../../../control_throttle.pkl"
with open(out_file, 'rb') as handle:
prevdict = pickle.load(handle)
# print(prevdict)
prevdict.update(data_dict)
# print(prevdict)
with open(out_file, 'wb') as handle:
pickle.dump(prevdict, handle)
# Clean up
del self.classifier
del self.obj_tracker
print("Bye!")
sys.stdout.flush()
|
base.py
|
# web/ws/base.py --- Base websocket handling
#
# Copyright (C) 2018 Stefano Sartor - stefano.sartor@inaf.it
import asyncio
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
from threading import Thread
class WampBase(object):
"""Base class for websocket streaming"""
def __init__(self, con):
""":param web.rest.base.Connection con: the base http connection"""
self.conn = con
self.runner = None
self.url = None
self.loop = None
self.session = None
self.th = None
def connect(self, url, realm):
"""connect to websocket
:param str url: url to which connect
"""
self.url = url
if self.conn.id is None:
self.conn.login()
self.th = Thread(target=self.__f)
self.runner = ApplicationRunner(
url=url,
ssl=True,
realm=realm,
headers={"cookie": "sessionid=%s" % self.conn.id},
)
self.loop = asyncio.get_event_loop()
self.session = ApplicationSession()
coro = self.runner.run(self.session, start_loop=False)
(self.__transport, self.__protocol) = self.loop.run_until_complete(coro)
self.th.start()
def subscribe(self, callback, topic):
if self.session is None:
raise RuntimeError("no Connection active")
return self.session.subscribe(callback, topic)
def leave(self):
if self.session is not None:
self.session.leave()
self.stop()
def stop(self):
if self.loop is not None:
self.loop.stop()
self.loop = None
def __f(self):
# asyncio.set_event_loop(self.loop)
self.loop.run_forever()
|
__init__old.py
|
#!/usr/bin/env python
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import sys
import threading
import time
import uuid
import warnings
from collections import namedtuple
from functools import wraps
import numpy as np
import zmq
from zmq.utils import jsonapi
from .protocol import *
from .decentralizedworker import *
__all__ = ['__version__', 'WKRClient', 'ConcurrentWKRClient', 'WKRWorker', 'WKRDecentralizeCenter']
# in the future client version must match with server version
__version__ = '1.0.0-b'
if sys.version_info >= (3, 0):
from ._py3_var import *
else:
from ._py2_var import *
_Response = namedtuple('_Response', ['id', 'content'])
Response = namedtuple('Response', ['id', 'embedding'])
class WKRClient(object):
def __init__(self, ip='localhost', port=5555, port_out=5556,
protocol='obj', output_fmt='list',
show_server_config=False, identity=None,
check_version=True, check_length=False,
ignore_all_checks=True,
timeout=15*60*1000): # 4*60*1000 timeout after 4m, default is -1, mean forever
""" A client object connected to a TTSServer
Create a WKRClient that connects to a TTSServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `ignore_all_checks=True`
You can also use it as a context manager:
.. highlight:: python
.. code-block:: python
with WKRClient() as bc:
bc.encode(...)
# bc is automatically closed out of the context
:type timeout: int
:type check_version: bool
:type check_length: bool
:type ignore_all_checks: bool
:type identity: str
:type show_server_config: bool
:type output_fmt: str
:type port_out: int
:type port: int
:type ip: str
:param ip: the ip address of the server
:param port: port for pushing data from client to server, must be consistent with the server side config
:param port_out: port for publishing results from server to client, must be consistent with the server side config
:param output_fmt: the output format of the sentence encodes, either in numpy array or python List[List[float]] (ndarray/list)
:param show_server_config: whether to show server configs when first connected
:param identity: the UUID of this client
:param check_version: check if server has the same version as client, raise AttributeError if not the same
:param check_length: check if server `max_seq_len` is less than the sentence length before sent
:param ignore_all_checks: ignore all checks, set it to True if you are not sure whether the server is ready when constructing WKRClient()
:param timeout: set the timeout (milliseconds) for receive operation on the client, -1 means no timeout and wait until result returns
"""
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUSH)
self.sender.setsockopt(zmq.LINGER, 0)
self.identity = identity or str(uuid.uuid4()).encode('ascii')
self.sender.connect('tcp://%s:%d' % (ip, port))
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.LINGER, 0)
self.receiver.setsockopt(zmq.SUBSCRIBE, self.identity)
self.receiver.connect('tcp://%s:%d' % (ip, port_out))
self.request_id = 0
self.timeout = timeout
self.pending_request = set()
self.pending_response = {}
if protocol not in ['obj', 'numpy']:
raise AttributeError('"protocol" must be "obj" or "numpy"')
self.protocol = protocol
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
self.output_fmt = output_fmt
self.port = port
self.port_out = port_out
self.ip = ip
self.length_limit = 0
self.token_info_available = False
if not ignore_all_checks and (check_version or show_server_config or check_length):
s_status = self.server_status
if check_version and s_status['server_version'] != self.status['client_version']:
raise AttributeError('version mismatch! server version is %s but client version is %s!\n'
'consider "pip install -U tts-serving-server tts-serving-client"\n'
'or disable version-check by "WKRClient(check_version=False)"' % (
s_status['server_version'], self.status['client_version']))
if check_length:
if s_status['target_max_seq_len'] is not None:
self.length_limit = int(s_status['target_max_seq_len'])
else:
self.length_limit = None
if show_server_config:
self._print_dict(s_status, 'server config:')
def close(self):
"""
Gently close all connections of the client. If you are using WKRClient as context manager,
then this is not necessary.
"""
self.sender.close()
self.receiver.close()
self.context.term()
def _send(self, msg, target_request_id=None):
self.request_id += 1
req_id = target_request_id if target_request_id else self.request_id
req_id = str(req_id)
if self.protocol == 'json':
msg = jsonapi.dumps(msg)
self.sender.send_multipart([self.identity, msg, b'%s' % req_id.encode(), b''])
else:
md = dict(dtype=str(msg.dtype), shape=msg.shape)
self.sender.send_multipart([self.identity, msg, b'%s' % req_id.encode(), jsonapi.dumps(md)])
self.pending_request.add(req_id)
return req_id
def _recv(self, wait_for_req_id=None):
try:
wait_for_req_id = str(wait_for_req_id) if wait_for_req_id else None
while True:
# a request has been returned and found in pending_response
if wait_for_req_id in self.pending_response:
response = self.pending_response.pop(wait_for_req_id)
return _Response(wait_for_req_id, response)
# receive a response
response = self.receiver.recv_multipart()
request_id = response[-1].decode()
# if not wait for particular response then simply return
if not wait_for_req_id or (wait_for_req_id == request_id):
self.pending_request.remove(request_id)
return _Response(request_id, response)
elif wait_for_req_id != request_id:
self.pending_response[request_id] = response
# wait for the next response
except Exception as e:
raise e
finally:
if wait_for_req_id in self.pending_request:
self.pending_request.remove(wait_for_req_id)
def _recv_ndarray(self, wait_for_req_id=None):
request_id, response = self._recv(wait_for_req_id)
if self.protocol == 'json':
arr_info, json_val = response[1], jsonapi.loads(response[2])
return Response(request_id, json_val)
else:
arr_info, arr_val = jsonapi.loads(response[1]), response[2]
X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
return Response(request_id, self.formatter(X.reshape(arr_info['shape'])))
@property
def status(self):
"""
Get the status of this WKRClient instance
:rtype: dict[str, str]
:return: a dictionary contains the status of this WKRClient instance
"""
return {
'identity': self.identity,
'num_request': self.request_id,
'num_pending_request': len(self.pending_request),
'pending_request': self.pending_request,
'output_fmt': self.output_fmt,
'port': self.port,
'port_out': self.port_out,
'server_ip': self.ip,
'client_version': __version__,
'timeout': self.timeout
}
def _timeout(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if 'blocking' in kwargs and not kwargs['blocking']:
# override client timeout setting if `func` is called in non-blocking way
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
else:
self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)
try:
return func(self, *args, **kwargs)
except zmq.error.Again as _e:
t_e = TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" and "port_out" correct? '
'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout)
if _py2:
raise t_e
else:
_raise(t_e, _e)
finally:
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
return arg_wrapper
@property
@_timeout
def server_status(self):
"""
Get the current status of the server connected to this client
:return: a dictionary contains the current status of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_CONFIG')
return jsonapi.loads(self._recv(req_id).content[1])
@_timeout
def encode(self, data, blocking=True, target_request_id=None):
req_id = self._send(data, target_request_id=target_request_id)
if not blocking:
return None
r = self._recv_ndarray(req_id)
return r.embedding
def fetch(self, delay=.0):
""" Fetch the encoded vectors from server, use it with `encode(blocking=False)`
Use it after `encode(texts, blocking=False)`. If there is no pending requests, will return None.
Note that `fetch()` does not preserve the order of the requests! Say you have two non-blocking requests,
R1 and R2, where R1 with 256 samples, R2 with 1 samples. It could be that R2 returns first.
To fetch all results in the original sending order, please use `fetch_all(sort=True)`
:type delay: float
:param delay: delay in seconds and then run fetcher
:return: a generator that yields request id and encoded vector in a tuple, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
time.sleep(delay)
while self.pending_request:
yield self._recv_ndarray()
def fetch_all(self, sort=True, concat=False, parse_id_func=None):
""" Fetch all encoded vectors from server, use it with `encode(blocking=False)`
Use it `encode(texts, blocking=False)`. If there is no pending requests, it will return None.
:type sort: bool
:type concat: bool
:param sort: sort results by their request ids. It should be True if you want to preserve the sending order
:param concat: concatenate all results into one ndarray
:return: encoded sentence/token-level embeddings in sending order
:rtype: numpy.ndarray or list[list[float]]
"""
if self.pending_request:
tmp = list(self.fetch())
if sort:
if parse_id_func is None:
parse_id_func = lambda v: v.id
tmp = sorted(tmp, key=parse_id_func)
tmp = [v.embedding for v in tmp]
if concat:
if self.output_fmt == 'ndarray':
tmp = np.concatenate(tmp, axis=0)
elif self.output_fmt == 'list':
tmp = [vv for v in tmp for vv in v]
return tmp
def encode_async(self, batch_generator, max_num_batch=None, delay=0.1, **kwargs):
""" Async encode batches from a generator
:param delay: delay in seconds and then run fetcher
:param batch_generator: a generator that yields list[str] or list[list[str]] (for `is_tokenized=True`) every time
:param max_num_batch: stop after encoding this number of batches
:param `**kwargs`: the rest parameters please refer to `encode()`
:return: a generator that yields encoded vectors in ndarray, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
def run():
cnt = 0
for texts in batch_generator:
self.encode(texts, blocking=False, **kwargs)
cnt += 1
if max_num_batch and cnt == max_num_batch:
break
t = threading.Thread(target=run)
t.start()
return self.fetch(delay)
@staticmethod
def _check_length(texts, len_limit):
# do a simple whitespace tokenizer
return all(len(t.split()) <= len_limit for t in texts)
@staticmethod
def _print_dict(x, title=None):
if title:
print(title)
for k, v in x.items():
print('%30s\t=\t%-30s' % (k, v))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class BCManager():
def __init__(self, available_bc):
self.available_bc = available_bc
self.bc = None
def __enter__(self):
self.bc = self.available_bc.pop()
return self.bc
def __exit__(self, *args):
self.available_bc.append(self.bc)
class ConcurrentWKRClient(WKRClient):
def __init__(self, max_concurrency=10, **kwargs):
""" A thread-safe client object connected to a TTSServer
Create a WKRClient that connects to a TTSServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `check_version=False` and `check_length=False`
:type max_concurrency: int
:param max_concurrency: the maximum number of concurrent connections allowed
"""
try:
from wkr_serving.client import WKRClient
except ImportError:
raise ImportError('WKRClient module is not available, it is required for serving HTTP requests.'
'Please use "pip install -U tts-serving-client" to install it.'
'If you do not want to use it as an HTTP server, '
'then remove "-http_port" from the command line.')
self.available_bc = [WKRClient(**kwargs) for _ in range(max_concurrency)]
self.max_concurrency = max_concurrency
def close(self):
for bc in self.available_bc:
bc.close()
def _concurrent(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
try:
with BCManager(self.available_bc) as bc:
f = getattr(bc, func.__name__)
r = f if isinstance(f, dict) else f(*args, **kwargs)
return r
except IndexError:
raise RuntimeError('Too many concurrent connections!'
'Try to increase the value of "max_concurrency", '
'currently =%d' % self.max_concurrency)
return arg_wrapper
@_concurrent
def encode(self, **kwargs):
pass
@property
@_concurrent
def server_status(self):
pass
@property
@_concurrent
def status(self):
pass
def fetch(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentWKRClient" is not implemented yet')
def fetch_all(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentWKRClient" is not implemented yet')
def encode_async(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentWKRClient" is not implemented yet')
|
main.py
|
import zmq
import syslog
import os
import io
import argparse
import pdb
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing import Process, Pool
from event_handler import EventHandler
from config import Config
from logger import MEHLogger
from plugin_provider import PluginProvider
class Main(object):
def __init__(self, params):
self.ctx = zmq.Context()
self.pool = Pool(processes=mp.cpu_count())
self.config = Config(params = params)
self.logger = MEHLogger(self.config)
self.plugin_provider = PluginProvider(self.config, self.logger)
self.event_handler = EventHandler(self.plugin_provider, self.logger)
def start(self):
self.logger.info('Connecting to MISP-ZMQ Server at %s:%s' % (self.config.misp_host, self.config.misp_port))
s = self.ctx.socket(zmq.SUB)
s.connect('tcp://%s:%s' % (self.config.misp_host, self.config.misp_port))
s.setsockopt(zmq.SUBSCRIBE, b'')
self.logger.info('MISP-Server connection established.')
self.logger.info('Subscribed and listening for MISP-Messages...')
while True:
msg = s.recv_multipart()
self.logger.info('New MISP-Message received.')
Process(target = self.process, args=msg).start()
s.close()
self.logger.info("Connection closed!")
def process(self, msg):
self.event_handler.process(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='MISP ZMQ EventHandler listening for new ZMQ Events.')
parser.add_argument('-c', '--config', dest='config_path',
action='store',
default="/etc/misp_eventhandler/config.yml",
metavar="FILE",
help='Path to config file (default: /etc/misp_eventhandler/config.yml)'
)
parser.add_argument('-r', '--host', dest='host',
action='store_const',
const="host",
default="localhost",
help='MISP ZMQ IP Address (default: localhost)'
)
parser.add_argument('-p', '--port', dest='port',
default=50000,
type=int,
help='MIPS ZMQ Port (default: 50000)'
)
parser.add_argument('-e', '--env', dest='env',
action='store_const',
const="env",
default="production",
help='Define execution environment (default: production)'
)
parser.add_argument('-d', '--deamon', dest='deamon',
action='store_true',
default=False,
help='Run in background as a deamon (default: False)'
)
parser.add_argument('--log-level', dest='log_level',
action='store_const',
default="info",
const="log_level",
help='define log level (default: info)'
)
main = Main(parser.parse_args())
main.start()
|
server.py
|
# coding: utf-8
from socket import *
import json
import selectors
import queue
import threading
sel = selectors.DefaultSelector() # create a selector
clientsDB = {} # clients' data base
msgQ = queue.Queue()
# This function gets a message from the queue, decodes it and resends it
def work():
while True:
conn, data = msgQ.get()
print('echoing', repr(data), 'to', conn)
# Hope it won't block
json_msg = json.loads(data.decode('utf-8'))
if json_msg["Message"]=="exit":
clientsDB.pop(conn.getpeername()[1])
print(clientsDB)
for port,connection in clientsDB.items():
connection.send((str(json_msg["Timestamp"]) + " | from: " + str(conn.getpeername()[1] ) + " --> " + json_msg["Message"] + "*").encode('utf-8'))
# This function simply reads the message received and puts it in a queue
def read(conn, mask):
data = conn.recv(1024) # Should be ready
if data:
msgQ.put((conn,data))
else:
print('closing', conn)
sel.unregister(conn)
conn.close()
def dispatch():
while True:
events = sel.select()
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
def main():
serverSocket = socket(AF_INET, SOCK_STREAM) # create an INET, STREAMing socket
serverSocket.bind(('localhost', 1241)) # bind the socket to a public host, and a well-known port
serverSocket.listen(5) # become a server socket
#Create and start the dispatcher thread
dispatcher = threading.Thread(target=dispatch) #Create new dispatcher thread
dispatcher.start()
#Create and start the 2 worker threads
workers1 = threading.Thread(target=work)
workers1.start()
workers2 = threading.Thread(target=work)
workers2.start()
while True:
clientSocket, addr = serverSocket.accept() # Accept new client
print('accepted', clientSocket, 'from', addr)
ip, port = addr
clientsDB[port] = clientSocket
clientSocket.setblocking(False)
sel.register(clientSocket, selectors.EVENT_READ, read) # Register new client
clientSocket.close() # close the connection
if __name__ == '__main__':
main()
|
main.py
|
from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
import torch.multiprocessing as mp
from environment import create_env
from model import A3C_MLP, A3C_CONV
from train import train
from test import test
from shared_optim import SharedRMSprop, SharedAdam
from gym.configuration import undo_logger_setup
import time
undo_logger_setup()
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument(
'--lr',
type=float,
default=0.0001,
metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument(
'--gamma',
type=float,
default=0.99,
metavar='G',
help='discount factor for rewards (default: 0.99)')
parser.add_argument(
'--tau',
type=float,
default=1.00,
metavar='T',
help='parameter for GAE (default: 1.00)')
parser.add_argument(
'--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--workers',
type=int,
default=32,
metavar='W',
help='how many training processes to use (default: 32)')
parser.add_argument(
'--num-steps',
type=int,
default=20,
metavar='NS',
help='number of forward steps in A3C (default: 300)')
parser.add_argument(
'--max-episode-length',
type=int,
default=10000,
metavar='M',
help='maximum length of an episode (default: 10000)')
parser.add_argument(
'--env',
default='BipedalWalker-v2',
metavar='ENV',
help='environment to train on (default: BipedalWalker-v2)')
parser.add_argument(
'--shared-optimizer',
default=True,
metavar='SO',
help='use an optimizer without shared statistics.')
parser.add_argument(
'--load',
default=False,
metavar='L',
help='load a trained model')
parser.add_argument(
'--save-max',
default=True,
metavar='SM',
help='Save model on every test run high score matched or bested')
parser.add_argument(
'--optimizer',
default='Adam',
metavar='OPT',
help='shares optimizer choice of Adam or RMSprop')
parser.add_argument(
'--load-model-dir',
default='trained_models/',
metavar='LMD',
help='folder to load trained models from')
parser.add_argument(
'--save-model-dir',
default='trained_models/',
metavar='SMD',
help='folder to save trained models')
parser.add_argument(
'--log-dir',
default='logs/',
metavar='LG',
help='folder to save logs')
parser.add_argument(
'--model',
default='MLP',
metavar='M',
help='Model type to use')
parser.add_argument(
'--stack-frames',
type=int,
default=1,
metavar='SF',
help='Choose number of observations to stack')
parser.add_argument(
'--gpu-ids',
type=int,
default=-1,
nargs='+',
help='GPUs to use [-1 CPU only] (default: -1)')
parser.add_argument(
'--amsgrad',
default=True,
metavar='AM',
help='Adam optimizer amsgrad parameter')
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
# Implemented multiprocessing using locks but was not beneficial. Hogwild
# training was far superior
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.gpu_ids == -1:
args.gpu_ids = [-1]
else:
torch.cuda.manual_seed(args.seed)
#mp.set_start_method('spawn') # commented out by dusty-nv for python 2.7 compatability
env = create_env(args.env, args)
if args.model == 'MLP':
shared_model = A3C_MLP(
env.observation_space.shape[0], env.action_space, args.stack_frames)
if args.model == 'CONV':
shared_model = A3C_CONV(args.stack_frames, env.action_space)
if args.load:
saved_state = torch.load('{0}{1}.dat'.format(
args.load_model_dir, args.env), map_location=lambda storage, loc: storage)
shared_model.load_state_dict(saved_state)
shared_model.share_memory()
if args.shared_optimizer:
if args.optimizer == 'RMSprop':
optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
if args.optimizer == 'Adam':
optimizer = SharedAdam(
shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad)
optimizer.share_memory()
else:
optimizer = None
processes = []
p = mp.Process(target=test, args=(args, shared_model))
p.start()
processes.append(p)
time.sleep(0.1)
for rank in range(0, args.workers):
p = mp.Process(target=train, args=(
rank, args, shared_model, optimizer))
p.start()
processes.append(p)
time.sleep(0.1)
for p in processes:
time.sleep(0.1)
p.join()
|
main.py
|
"""
-------------------------------------------------
File Name: main.py
Description : 爬虫主程序
Author : Cai Yufan
date: 2019/10/14
-------------------------------------------------
Change Activity:
add IP proxy module
change to abuyun
每次到下一页都会改一次IP:IP更改太乱了
或者可以每隔10分钟改一次IP:固定更改,不灵活
给IP加锁?行不通,代理服务器自动变换IP,无法加锁
大问题:
多线程IP仍有可能出错,因为每一篇文章,需要三次请求,两次预请求一次下载,这之间有时间差,
若IP在这个时间差内失效,IP更改将出错。
小问题:
check_ip_valid 太过频繁,可能导致无法请求而出错
-------------------------------------------------
"""
import requests
import re
import time, os, shutil, logging
from UserInput import get_uesr_inpt
from GetConfig import config
# from CrackVerifyCode import crack
from GetPageDetail import PageDetail
# 引入字节编码
from urllib.parse import quote
# 引入beautifulsoup
from bs4 import BeautifulSoup
import threading
import queue
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import kdl
import random
HEADER = config.crawl_headers
# 获取cookie
BASIC_URL = 'http://kns.cnki.net/kns/brief/result.aspx'
# 利用post请求先行注册一次
SEARCH_HANDLE_URL = 'http://kns.cnki.net/kns/request/SearchHandler.ashx'
# 发送get请求获得文献资源
GET_PAGE_URL = 'http://kns.cnki.net/kns/brief/brief.aspx?pagename='
# 下载的基础链接
DOWNLOAD_URL = 'http://kns.cnki.net/kns/'
# 切换页面基础链接
CHANGE_PAGE_URL = 'http://kns.cnki.net/kns/brief/brief.aspx'
#################################################################
# 代理服务器
proxyHost = "http-cla.abuyun.com"
proxyPort = "9030"
# 代理隧道验证信息
proxyUser = "HN9CNRBIIRFC671C"
proxyPass = "5799B743F9F08540"
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host" : proxyHost,
"port" : proxyPort,
"user" : proxyUser,
"pass" : proxyPass,
}
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
def get_one_ip():
print("try to get one new ip.")
if not check_ip_valid(proxies):
# print("get one new ip success.")
update_ip(proxies)
else:
# print("old ip still work.")
pass
return proxies
def check_ip_valid(ip_json):
targetUrl = "http://test.abuyun.com"
resp = requests.get(targetUrl, proxies=ip_json)
# print("check_ip_valid:", resp.status_code == 200)
return resp.status_code == 200
# 用于detail page的ip更新
def update_ip(proxy):
targetUrl = "http://proxy.abuyun.com/switch-ip"
resp = requests.get(targetUrl, proxies=proxy)
print("新的IP:", resp.text)
return
##################################################################
class SearchTools(object):
'''
构建搜索类
实现搜索方法
'''
def __init__(self, cnt):
session = requests.Session()
self.session = session
self.cur_page_num = 1
# 保持会话
ip_json = get_one_ip()
# print(ip)
# print(ip_json)
# self.ip_string = ip_json['http'][7:]
self.ip_json = ip_json
# self.ip_for_detail_string = ip_json['http'][7:]
self.ip_for_detail_json = ip_json
self.session.get(BASIC_URL, headers=HEADER, timeout=11, proxies=self.ip_json)
def search_reference(self, ueser_input, item):
'''
第一次发送post请求
再一次发送get请求,这次请求没有写文献等东西
两次请求来获得文献列表
'''
static_post_data = {
'action': '',
'NaviCode': '*',
'ua': '1.21',
'isinEn': '1',
'PageName': 'ASP.brief_default_result_aspx',
'DbPrefix': 'SCDB',
'DbCatalog': '中国学术期刊网络出版总库',
'ConfigFile': 'CJFQ.xml',
'db_opt': 'CJFQ', # 搜索类别(CNKI右侧的)
'db_value': '中国学术期刊网络出版总库',
'his': '0',
'db_cjfqview': '中国学术期刊网络出版总库,WWJD',
'db_cflqview': '中国学术期刊网络出版总库',
'__': time.asctime(time.localtime()) + ' GMT+0800 (中国标准时间)'
}
# 将固定字段与自定义字段组合
post_data = {**static_post_data, **ueser_input}
# print(post_data)
# 必须有第一次请求,否则会提示服务器没有用户
first_post_res = self.session.post(
SEARCH_HANDLE_URL, data=post_data, headers=HEADER, proxies=self.ip_json)
# get请求中需要传入第一个检索条件的值
key_value = quote(ueser_input.get('txt_1_value1'))
self.get_result_url = GET_PAGE_URL + first_post_res.text + '&t=1544249384932&keyValue=' + key_value + '&S=1&sorttype='
# 检索结果的第一个页面
second_get_res = self.session.get(self.get_result_url,
headers=HEADER, timeout=12, proxies=self.ip_json)
change_page_pattern_compile = re.compile(
r'.*?pagerTitleCell.*?<a href="(.*?)".*')
try:
self.change_page_url = re.search(change_page_pattern_compile,
second_get_res.text).group(1)
except:
# print("该学校在该年无记录或名称错误")
# global global_remain_page
# global_remain_page = -1
item.change_global_remain_page(-1)
return 0
return self.parse_page(
self.pre_parse_page(second_get_res.text, item), second_get_res.text, item)
def pre_parse_page(self, page_source, item):
'''
预爬取,检测有多少页,剩余多少页,用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
# print('检索到' + reference_num + '条结果,全部下载大约需要' + s2h(reference_num_int * 5) + '。')
# if word:
# is_all_download = 'y'
# else:
# is_all_download = input('是否要全部下载(y/n)?')
is_all_download = 'y'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
total_page, i = divmod(reference_num_int, 20)
# print('总共', total_page+1, '页')
if item.return_remain_page == 9999:
select_download_page = item.return_start_page()
self.cur_page_num = select_download_page
# global global_first
# if select_download_page == 1:
# global_first = True
# else:
# global_first = False
if select_download_page == 1:
item.change_global_first(1)
else:
item.change_global_first(0)
# -1 is use for this journal is not exist, see in the function search_reference() the except part.
elif item.return_remain_page == -1:
self.cur_page_num = 1
else:
# global global_current_page
######################################################################
# self.cur_page_num = global_current_page
self.cur_page_num = item.return_current_page()
# download_page_left = item.return_remain_page()
# return download_page_left
download_page_left = total_page - self.cur_page_num
if i != 0:
download_page_left += 1
# print("pre_parse_page download_page_left", download_page_left)
return download_page_left
def parse_page(self, download_page_left, page_source, item):
'''
保存页面信息
解析每一页的下载地址
'''
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码, 如果出现验证码这步将失败,会进入exception,即重新刷新页面
try:
# 去除第一个tr标签(表头)
# print("parse_page: ", download_page_left)
tr_table.tr.extract()
except Exception as e:
# logging.error(e)
################################
# 10.14 一旦出现验证码,直接换IP
print("出现验证码")
update_ip(proxies)
###############################
# print("剩余的页数",download_page_left)
# print("已经爬取的页数",self.cur_page_num)
# global global_current_page
# global_current_page = self.cur_page_num
item.change_global_current_page(self.cur_page_num)
return
# 遍历每一行
# print(len(tr_table.find_all(name='tr'))) #测试页面返回结果
# print(len(list(enumerate(tr_table.find_all(name='tr')))))
# global global_first #不再使用全局变量,而将其封装在Globalitem之中
global_first = item.return_global_first()
if not global_first:
# global_first = True
item.change_global_first(1)
self.get_another_page(download_page_left, item)
# print("global_first return")
return
# print(download_page_left, '\n', tr_table.find_all(name='tr'))
# print(tr_table.find_all(name='tr'))
if tr_table.find_all(name='tr') == []:
# print("出现问题")
# print("剩余的页数",download_page_left)
# print("已经爬取的页数",self.cur_page_num)
# global_current_page = self.cur_page_num
item.change_global_current_page(self.cur_page_num)
return
# print(tr_table)
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
# print(tr_info)
single_refence_list = []
try:
tr_text = ''
download_url = ''
detail_url = ''
author_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
# print(tr_info.find_all(name='td'))
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# 注意保存在各自以期刊名字为文件夹名的文件夹中
with open(item.return_name() + '/ReferenceList.txt', 'a', encoding='utf-8') as file:
file.write(td_text +' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
download_url = dl_url.attrs['href']
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
# print(single_refence_list)
self.download_refence(download_url, single_refence_list, item)
# 是否开启详情页数据抓取
# 主要有用的信息均在详情页抓取!!!!!!!!!!!!
if not check_ip_valid(self.ip_for_detail_json):
print("下载详细页面时,IP过期")
update_ip(proxies)
if config.crawl_isdetail == '1':
time.sleep(config.crawl_stepWaitTime)
item.get_page_detail.get_detail_page(self.session, self.get_result_url, detail_url,
single_refence_list, self.download_url,
self.cur_page_num, item.return_name(), self.ip_for_detail_json)
# 下载作者的id到reference list
try:
with open(item.return_name() + '/ReferenceList.txt', 'a', encoding='utf-8') as file:
for author_tmp in tr_info.find_all(class_='author_flag'):
author_url = author_tmp.a['href']
file.write(author_url + ' ')
# 在每一行结束后输入一个空行
file.write('\n')
except:
# print("no author id")
pass
# download_page_left为剩余等待遍历页面
except Exception as e:
### 修改
# print("get this line fail, log to fail_file: fail_pdf.txt")
logging.error(e)
fail_file = open("fail_pdf.txt", "a", encoding='utf-8')
fail_file.write(single_refence_list[1]+'\n')
# print(single_refence_list)
fail_file.close()
global FAIL
FAIL = FAIL + 1
# print('错误数目:', FAIL)
# print("download_page_left:", download_page_left)
download_page_left = download_page_left - 1
# global global_remain_page
# global_remain_page = download_page_left
item.change_global_remain_page(download_page_left)
if download_page_left >= 0:
self.cur_page_num += 1
item.change_global_current_page(self.cur_page_num)
self.get_another_page(download_page_left, item)
# print("parse page finials:", download_page_left)
return
def get_another_page(self, download_page_left, item):
'''
请求其他页面和请求第一个页面形式不同
重新构造请求
'''
try:
time.sleep(config.crawl_stepWaitTime)
curpage_pattern_compile = re.compile(r'.*?curpage=(\d+).*?')
self.get_result_url = CHANGE_PAGE_URL + re.sub(
curpage_pattern_compile, '?curpage=' + str(self.cur_page_num),
self.change_page_url)
if check_ip_valid(self.ip_json):
get_res = self.session.get(self.get_result_url, headers=HEADER, timeout=13, proxies=self.ip_json)
else:
print("主页面 ip 过期: ")
item.change_global_current_page(self.cur_page_num) # 可能冗余
return
# print("get another page:", download_page_left)
self.parse_page(download_page_left, get_res.text, item)
except Exception as e:
print("get another page fail")
item.change_global_current_page(self.cur_page_num) # 可能冗余
logging.error(e)
return
def download_refence(self,url, single_refence_list, item):
'''
CAI:注意这是第一个列表页面的CAJ 文献下载,而不是详情页的下载,目前处于关闭状态
拼接下载地址
进行文献下载
'''
print(self.cur_page_num, '正在下载: '+ item.return_name() + ' ' + single_refence_list[1])
name = single_refence_list[1] + '_' + single_refence_list[2]
# 检查文件命名,防止网站资源有特殊字符本地无法保存
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# 拼接下载地址
self.download_url = DOWNLOAD_URL + re.sub(r'../', '', url)
# 保存下载链接
# with open('data/Links.txt', 'a', encoding='utf-8') as file:
# file.write(self.download_url + '\n')
# 检查是否开启下载模式
if config.crawl_isdownload == '1':
if not os.path.isdir('data/CAJs'):
os.mkdir(r'data/CAJs')
refence_file = requests.get(self.download_url, headers=HEADER, timeout=14)
if not os.path.exists('data/CAJs\\' + name + '.caj'):
with open('data/CAJs\\' + name + '.caj', 'wb') as file:
file.write(refence_file.content)
else:
print("Fail! The same name", name)
time.sleep(config.crawl_stepWaitTime)
def s2h(seconds):
'''
将秒数转为小时数
'''
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return ("%02d小时%02d分钟%02d秒" % (h, m, s))
def main(word, year, item):
time.perf_counter()
# be careful to create a empty data folder!
# if os.path.isdir('data'):
# print(' ')
# # 递归删除文件
# # shutil.rmtree('data')
# # 创建一个空的
# else:
# os.mkdir('data')
if not os.path.exists('data'):
os.makedirs('data')
search = SearchTools(item.return_counter())
search.search_reference(get_uesr_inpt(word, year), item)
# print('爬虫共运行:' + s2h(time.perf_counter()))
# print('--------------------------')
class GlobalItem(object):
def __init__(self, counter, name):
self.global_current_page = 1
self.global_first = 1
self.global_remain_page = 9999
self.counter = counter
self.start_page = 1
self.name = name
self.get_page_detail = PageDetail()
def change_global_remain_page(self, nn):
self.global_remain_page = nn
def change_global_current_page(self, nn):
self.global_current_page = nn
def change_global_first(self, nn):
self.global_first = nn
def return_remain_page(self):
return self.global_remain_page
def return_current_page(self):
return self.global_current_page
def return_counter(self):
return self.counter
def return_global_first(self):
return self.global_first
def return_start_page(self):
return self.start_page
def change_start_page(self, nn):
self.start_page = nn
def return_name(self):
return self.name
class MyTask:
def __init__(self, page_start, name, counter):
self.page_start = page_start
self.name = name
self.counter = counter
def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
print("Starting " + self.name)
# this self.counter is for IP
item = GlobalItem(self.counter, self.name)
word = self.name.strip()
# try:
# main(word, item.return_remain_page(), year, item)
# while item.return_remain_page() != -1:
# item.change_global_first(0)
# main(word, item.return_remain_page(), year, item)
# # print("出现验证码,将继续下载。剩余的页数:", item.return_remain_page())
# # print('--------------------------')
# with open('finish_journal.txt','a') as file:
# file.write(self.name)
# file.write('\n')
# print("Exiting, success finish" + self.name)
# except Exception as e:
# logging.error(e)
# with open('fail_journal.txt','a') as file:
# file.write(self.name)
# file.write('\n')
# print("Fail " + self.name)
try:
main(word, year, item)
except:
pass
# print("first main procedure finish")
while item.return_remain_page() != -1:
try:
# print("将重新登录以继续下载。剩余的页数:", word, item.return_remain_page())
# print(item.return_current_page())
item.change_global_first(0)
main(word, year, item)
except Exception as e:
logging.error(e)
time.sleep(1)
# print('--------------------------')
with open('finish_journal.txt', 'a', encoding='utf-8') as file:
file.write(self.name)
file.write('\n')
print("Exiting, success finish" + self.name, item.return_remain_page())
def return_name(self):
return self.name
queue = queue.Queue()
# 定义需要线程池执行的任务
def do_job():
while True:
i = queue.get()
# print('index %s, curent: %s' % (i.return_name(), threading.current_thread()))
i.run()
queue.task_done()
FAIL = 0
if __name__ == '__main__':
file = open("test_journal.txt", 'r', encoding='utf-8')
lines = file.readlines()
file.close()
fail_file = open("fail_pdf.txt", "a", encoding='utf-8')
fail_file.close()
# year = input("year:")
# IP = input("IP start:")
year = input("爬取的年份:")
number_of_thread = 15
# 创建包括number_of_thread个线程的线程池
for i in range(number_of_thread):
Thread = threading.Thread
t = Thread(target=do_job)
t.start()
# 模拟创建线程池0.1秒后塞进任务到队列
time.sleep(0.1)
cnt = 0
for i in lines:
# 检查文件命名,防止网站资源有特殊字符本地无法保存
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
journal_name = re.sub(file_pattern_compile, '', i[:-1])
if not os.path.exists(journal_name):
os.makedirs(journal_name)
task = MyTask(0, journal_name, cnt)
queue.put(task)
cnt += 1
if cnt == number_of_thread*2:
# break
time.sleep(600)
cnt = 0
time.sleep(10)
queue.join()
|
CreateAudio.py
|
import math
import os
import subprocess
import time
from copy import deepcopy
from multiprocessing import Process
from osr2mp4.Exceptions import AudioNotFound
from osr2mp4.osrparse.enums import Mod
from recordclass import recordclass
from scipy.io.wavfile import write
import numpy as np
from pydub import AudioSegment
from pydub import exceptions
from osr2mp4 import logger
from osr2mp4.AudioProcess.AddAudio import HitsoundManager
from osr2mp4.AudioProcess.Hitsound import Hitsound
from osr2mp4.AudioProcess.Utils import getfilenames
import os.path
from osr2mp4 import log_stream
Audio2p = recordclass("Audio2p", "rate audio")
def from_notwav(filename, settings):
if not os.path.isfile(filename):
raise FileNotFoundError
stream = log_stream()
subprocess.check_call([settings.ffmpeg, '-i', filename, '-ar', '44100', os.path.join(settings.temp, 'converted.wav'), '-y'], stdout=stream, stderr=stream)
a = AudioSegment.from_file(settings.temp + 'converted.wav')
return a
def read(f, settings, volume=1.0, speed=1.0, changepitch=True):
if speed != 1.0 and not changepitch:
stream = log_stream()
subprocess.check_call([settings.ffmpeg, '-i', f, '-codec:a', 'libmp3lame', '-filter:a', 'atempo={}'.format(speed), os.path.join(settings.temp, 'spedup.mp3'), '-y'], stdout=stream, stderr=stream)
f = os.path.join(settings.temp, "spedup.mp3")
if f[-4:] != ".wav":
a = from_notwav(f, settings)
else:
a = AudioSegment.from_file(f)
if volume > 0:
addvolume = 30 * math.log(volume, 10)
a += addvolume
else:
a = AudioSegment.silent(duration=a.duration_seconds * 1000, frame_rate=a.frame_rate)
if speed != 1.0:
if changepitch:
faster_senpai = a._spawn(a.raw_data, overrides={'frame_rate': int(a.frame_rate * speed)})
a = faster_senpai.set_frame_rate(a.frame_rate)
return pydubtonumpy(a)
def pydubtonumpy(audiosegment):
y = np.array(audiosegment.get_array_of_samples())
if audiosegment.channels == 2:
y = y.reshape((-1, 2))
if audiosegment.channels == 1:
y1 = np.zeros((len(y), 2), dtype=y.dtype)
y1[:, 0] = y * 0.5
y1[:, 1] = y * 0.5
y = y1
try:
h = max(2, audiosegment.sample_width) * 8
maxvalue = max(np.amax(y), 2 ** h)
except ValueError as e:
logger.error(repr(e))
maxvalue = 1
return audiosegment.frame_rate, np.float64(y) / maxvalue
def getaudiofromfile(filename, path, defaultpath, settings, volume=1.0, speed=1.0):
fmts = ["wav", "mp3", "ogg"]
for fmt in fmts:
try:
return read(os.path.join(path, filename + "." + fmt), settings, volume=volume, speed=speed)
except FileNotFoundError:
pass
except exceptions.CouldntDecodeError as e:
logger.error(repr(e) + " filename " + os.path.join(path, filename + "." + fmt))
return 1, np.zeros((0, 2), dtype=np.float32)
logger.warning("file not found %s, using default skin", filename)
if defaultpath is not None:
return getaudiofromfile(filename, defaultpath, None, settings, volume=volume, speed=speed)
logger.error("file not found %s", filename)
return 1, np.zeros((0, 2), dtype=np.float32)
def getaudiofrombeatmap(filename, beatmappath, path, defaultpath, settings, volume=1.0, speed=1.0):
try:
return read(os.path.join(beatmappath,filename + "." + "wav"), settings, volume=volume, speed=speed)
except FileNotFoundError:
try:
return read(os.path.join(beatmappath, filename + "." + "ogg"), settings, volume=volume, speed=speed)
except FileNotFoundError:
filename = ''.join(filter(lambda x: not x.isdigit(), filename))
return getaudiofromfile(filename, path, defaultpath, settings, volume=volume, speed=speed)
except exceptions.CouldntDecodeError:
return 1, np.zeros((0, 2), dtype=np.float32)
def setuphitsound(filenames, beatmappath, skinpath, defaultpath, settings=None):
bmapindex = 0
skinindex = 1
if settings.settings["Use skin's sound samples"]:
beatmappath = "reeeee"
for f in filenames[bmapindex]:
Hitsound.hitsounds[f] = Audio2p(*getaudiofrombeatmap(f, beatmappath, skinpath, defaultpath, settings, volume=settings.settings["Effect volume"]/100))
for f in filenames[skinindex]:
Hitsound.hitsounds[f] = Audio2p(*getaudiofromfile(f, skinpath, defaultpath, settings, volume=settings.settings["Effect volume"]/100))
Hitsound.spinnerbonus = Audio2p(*getaudiofromfile("spinnerbonus", skinpath, defaultpath, settings, volume=settings.settings["Effect volume"]/100))
Hitsound.miss = Audio2p(*getaudiofromfile("combobreak", skinpath, defaultpath, settings, volume=settings.settings["Effect volume"]/100))
Hitsound.sectionfail = Audio2p(*getaudiofromfile("sectionfail", skinpath, defaultpath, settings, volume=settings.settings["Effect volume"]/100))
Hitsound.sectionpass = Audio2p(*getaudiofromfile("sectionpass", skinpath, defaultpath, settings, volume=settings.settings["Effect volume"]/100))
for x in range(100, 150, 5):
speed = x/100
Hitsound.spinnerspin.append(Audio2p(*getaudiofromfile("spinnerspin", skinpath, defaultpath, settings, volume=settings.settings["Effect volume"]/200, speed=speed)))
def getoffset(offset, endtime, song):
if offset >= 0:
rendtime = endtime - offset
out = song.audio[int(offset / 1000 * song.rate):]
else:
offset = -offset
rendtime = endtime + offset
out = np.zeros((len(song.audio) + int(offset / 1000 * song.rate), 2), dtype=song.audio.dtype)
out[int(offset / 1000 * song.rate):] = song.audio
if endtime != -1:
e = min(int(rendtime / 1000 * song.rate), len(out))
out = out[:e]
return out
def apply_offset(song, offset):
if offset > 0:
out = np.zeros((len(song.audio) + int(offset / 1000 * song.rate), 2), dtype=song.audio.dtype)
out[int(offset / 1000 * song.rate):] = song.audio
else:
offset = -offset
out = song.audio[int(offset / 1000 * song.rate):]
return out
def processaudio(my_info, beatmap, offset, endtime, mods, settings):
try:
audioprc(my_info, beatmap, offset, endtime, mods, settings)
except Exception as e:
error = repr(e)
with open("error.txt", "w") as fwrite: # temporary fix
fwrite.write(repr(e))
logger.error("{} from audio\n\n\n".format(error))
raise
def get_actual_audio_filename(audio_filename, beatmap_path):
files_in_dir = os.listdir(beatmap_path)
for file in files_in_dir:
if(audio_filename.lower() == file.lower()):
return file
raise FileNotFoundError
def audioprc(my_info, beatmap, offset, endtime, mods, settings):
nc = Mod.Nightcore in mods
addmisssound = not (Mod.Relax in mods or Mod.Autopilot in mods)
skin_path = settings.skin_path
default_skinpath = settings.default_path
beatmap_path = settings.beatmap
audio_name = beatmap.general["AudioFilename"]
ccc = time.time()
try:
audio_name = get_actual_audio_filename(audio_name, beatmap_path)
song = Audio2p(*read(os.path.join(beatmap_path, audio_name), settings, volume=settings.settings["Song volume"]/100, speed=settings.timeframe/1000, changepitch=nc))
except FileNotFoundError:
raise AudioNotFound()
song.rate /= settings.timeframe/1000
song.audio = apply_offset(song, settings.settings["Song delay"])
filenames = getfilenames(beatmap, settings.settings["Ignore beatmap hitsounds"])
setuphitsound(filenames, beatmap_path, skin_path, default_skinpath, settings)
if not addmisssound:
Hitsound.miss = Audio2p(1, np.zeros((0, 2), dtype=np.float32))
hitsoundm = HitsoundManager(beatmap)
logger.debug("Done loading: %f", time.time() - ccc)
for x in range(len(my_info)):
hitsoundm.updatetimingpoint(my_info, x, song)
hitsoundm.addhitsound(my_info, x, song)
hitsoundm.addslidersound(my_info, x, song)
hitsoundm.addspinnerhitsound(my_info, x, song)
hitsoundm.addcombobreak(my_info, x, song)
hitsoundm.addsectionsound(my_info, x, song)
out = getoffset(offset, endtime, song)
write(settings.temp + 'audio.mp3', round(song.rate * settings.timeframe/1000), out)
def create_audio(my_info, beatmap_info, offset, endtime, settings, mods):
beatmap_info = deepcopy(beatmap_info)
if settings.process >= 1:
audio_args = (my_info, beatmap_info, offset, endtime, mods, settings,)
audio = Process(target=processaudio, args=audio_args)
audio.start()
return audio
else:
processaudio(my_info, beatmap_info, offset, endtime, mods, settings)
return None
|
serviceManager.py
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from queue import Empty, Queue
from threading import Event, Thread
from typing import Dict, Iterable, Set, Optional
from toil.job import ServiceJobDescription
from toil.lib.throttle import LocalThrottle, throttle
from toil.jobStores.abstractJobStore import AbstractJobStore
from toil.toilState import ToilState
logger = logging.getLogger( __name__ )
class ServiceManager( object ):
"""
Manages the scheduling of services.
"""
def __init__(self, job_store: AbstractJobStore, toil_state: ToilState):
logger.debug("Initializing service manager")
self.__job_store = job_store
self.__toil_state = toil_state
# We call the jobs that have services they need "client" jobs.
# These are all the client jobs that are waiting for their services to
# start.
self.__waiting_clients: Set[str] = set()
# This is used to terminate the thread associated with the service
# manager
self.__terminate = Event()
# This is the input queue of jobs that have services that need to be started
self.__clients_in: Queue[str] = Queue()
# This is the output queue of jobs that have services that
# are already started
self.__clients_out: Queue[str] = Queue()
# This is the output queue of jobs that have services that are unable
# to start
self.__failed_clients_out: Queue[str] = Queue()
# This is the queue of services for the batch system to start
self.__services_out: Queue[str] = Queue()
self.__service_manager_jobs = 0 # The number of jobs the service manager is scheduling
# Set up the service-managing thread.
self.__service_starter = Thread(target=self.__start_services, daemon=True)
def services_are_starting(self, job_id: str) -> bool:
"""
Return True if the services for the given job are currently being started, and False otherwise.
"""
return job_id in self.__waiting_clients
def get_job_count(self) -> int:
"""
Get the total number of jobs we are working on (services and their parent non-service jobs).
"""
return self.__service_manager_jobs
def start(self) -> None:
"""
Start the service scheduling thread.
"""
self.__service_starter.start()
def put_client(self, client_id: str) -> None:
"""
Schedule the services of a job asynchronously.
When the job's services are running the ID for the job will
be returned by toil.leader.ServiceManager.get_ready_client.
:param client_id: ID of job with services to schedule.
"""
# Go get the client's description, which includes the services it needs.
client = self.__toil_state.get_job(client_id)
logger.debug("Service manager queueing %s as client", client)
# Add job to set being processed by the service manager
self.__waiting_clients.add(client_id)
# Add number of jobs managed by ServiceManager
self.__service_manager_jobs += len(client.services) + 1 # The plus one accounts for the root job
# Asynchronously schedule the services
self.__clients_in.put(client_id)
def get_ready_client(self, maxWait: float) -> Optional[str]:
"""
:param float maxWait: Time in seconds to wait to get a JobDescription before returning
:return: the ID of a client whose services are running, or None if no
such job is available.
"""
try:
client_id = self.__clients_out.get(timeout=maxWait)
self.__waiting_clients.remove(client_id)
assert self.__service_manager_jobs >= 0
self.__service_manager_jobs -= 1
return client_id
except Empty:
return None
def get_unservable_client(self, maxWait: float) -> Optional[str]:
"""
:param float maxWait: Time in seconds to wait to get a JobDescription before returning
:return: the ID of a client whose services failed to start, or None if
no such job is available.
"""
try:
client_id = self.__failed_clients_out.get(timeout=maxWait)
self.__waiting_clients.remove(client_id)
assert self.__service_manager_jobs >= 0
self.__service_manager_jobs -= 1
return client_id
except Empty:
return None
def get_startable_service(self, maxWait: float) -> Optional[str]:
"""
:param float maxWait: Time in seconds to wait to get a job before returning.
:return: the ID of a service job that the leader can start, or None if no such job exists.
"""
try:
service_id = self.__services_out.get(timeout=maxWait)
assert self.__service_manager_jobs >= 0
self.__service_manager_jobs -= 1
return service_id
except Empty:
return None
def kill_services(self, service_ids: Iterable[str], error: bool = False) -> None:
"""
Stop all the given service jobs.
:param services: Service jobStoreIDs to kill
:param error: Whether to signal that the service failed with an error when stopping it.
"""
for service_id in service_ids:
# Get the job description, which knows about the flag files.
service = self.__toil_state.get_job(service_id)
assert isinstance(service, ServiceJobDescription)
if error:
self.__job_store.deleteFile(service.errorJobStoreID)
self.__job_store.deleteFile(service.terminateJobStoreID)
def is_active(self, service_id: str) -> bool:
"""
Returns true if the service job has not been told to terminate.
:param service_id: Service to check on
:rtype: boolean
"""
service = self.__toil_state.get_job(service_id)
return self.__job_store.fileExists(service.terminateJobStoreID)
def is_running(self, service_id: str) -> bool:
"""
Returns true if the service job has started and is active
:param service: Service to check on
:rtype: boolean
"""
service = self.__toil_state.get_job(service_id)
return (not self.__job_store.fileExists(service.startJobStoreID)) and self.is_active(service_id)
def check(self) -> None:
"""
Check on the service manager thread.
:raise RuntimeError: If the underlying thread has quit.
"""
if not self.__service_starter.is_alive():
raise RuntimeError("Service manager has quit")
def shutdown(self) -> None:
"""
Cleanly terminate worker threads starting and killing services. Will block
until all services are started and blocked.
"""
logger.debug('Waiting for service manager thread to finish ...')
start_time = time.time()
self.__terminate.set()
self.__service_starter.join()
# Kill any services still running to avoid deadlock
for services in list(self.__toil_state.servicesIssued.values()):
self.kill_services(services, error=True)
logger.debug('... finished shutting down the service manager. Took %s seconds', time.time() - start_time)
def __start_services(self) -> None:
"""
Thread used to schedule services.
"""
# Keep the user informed, but not too informed, as services start up
log_limiter = LocalThrottle(60)
# These are all keyed by ID
starting_services = set()
remaining_services_by_client = {}
service_to_client = {}
clients_with_failed_services = set()
while True:
with throttle(1.0):
if self.__terminate.is_set():
logger.debug('Received signal to quit starting services.')
break
try:
client_id = self.__clients_in.get_nowait()
client = self.__toil_state.get_job(client_id)
host_id_batches = list(client.serviceHostIDsInBatches())
logger.debug("Service manager processing client %s with %d batches of services", client, len(host_id_batches))
if len(host_id_batches) > 1:
# Have to fall back to the old blocking behavior to
# ensure entire service "groups" are issued as a whole.
self.__start_batches_blocking(client_id)
continue
# Found a new job that needs to schedule its services.
for batch in host_id_batches:
# There should be just one batch so we can do it here.
remaining_services_by_client[client_id] = len(batch)
for service_id in batch:
# Load up the service object.
service_job_desc = self.__toil_state.get_job(service_id)
# Remember the parent job
service_to_client[service_id] = client_id
# We should now start to monitor this service to see if
# it has started yet.
starting_services.add(service_id)
# Send the service JobDescription off to be started
logger.debug('Service manager is starting service job: %s, start ID: %s', service_job_desc, service_job_desc.startJobStoreID)
self.__services_out.put(service_id)
except Empty:
# No new jobs that need services scheduled.
pass
pending_service_count = len(starting_services)
if pending_service_count > 0 and log_limiter.throttle(False):
logger.debug('%d services are starting...', pending_service_count)
for service_id in list(starting_services):
service_job_desc = self.__toil_state.get_job(service_id)
if not self.__job_store.fileExists(service_job_desc.startJobStoreID):
# Service has started (or failed)
logger.debug('Service %s has removed %s and is therefore started', service_job_desc, service_job_desc.startJobStoreID)
starting_services.remove(service_id)
client_id = service_to_client[service_id]
remaining_services_by_client[client_id] -= 1
assert remaining_services_by_client[client_id] >= 0
del service_to_client[service_id]
if not self.__job_store.fileExists(service_job_desc.errorJobStoreID):
logger.error('Service %s has immediately failed before it could be used', service_job_desc)
# It probably hasn't fileld in the promise that the job that uses the service needs.
clients_with_failed_services.add(client_id)
# Find if any clients have had *all* their services started.
ready_clients = set()
for client_id, remainingServices in remaining_services_by_client.items():
if remainingServices == 0:
if client_id in clients_with_failed_services:
logger.error('Job %s has had all its services try to start, but at least one failed', self.__toil_state.get_job(client_id))
self.__failed_clients_out.put(client_id)
else:
logger.debug('Job %s has all its services started', self.__toil_state.get_job(client_id))
self.__clients_out.put(client_id)
ready_clients.add(client_id)
for client_id in ready_clients:
del remaining_services_by_client[client_id]
def __start_batches_blocking(self, client_id: str) -> None:
"""
Wait until all the services for the given job are started, starting
them in batches that are all issued together.
"""
# Keep the user informed, but not too informed, as services start up
log_limiter = LocalThrottle(60)
# Start the service jobs in batches, waiting for each batch
# to become established before starting the next batch
for service_job_list in self.__toil_state.get_job(client_id).serviceHostIDsInBatches():
# When we get the job descriptions we store them here to go over them again.
wait_on = []
for service_id in service_job_list:
# Find the service object.
service_job_desc = self.__toil_state.get_job(service_id)
logger.debug("Service manager is starting service job: %s, start ID: %s", service_job_desc, service_job_desc.startJobStoreID)
assert self.__job_store.fileExists(service_job_desc.startJobStoreID)
# At this point the terminateJobStoreID and errorJobStoreID could have been deleted!
self.__services_out.put(service_id)
# Save for the waiting loop
wait_on.append(service_job_desc)
# Wait until all the services of the batch are running
for service_id in service_job_list:
# Find the service object.
service_job_desc = self.__toil_state.get_job(service_id)
while self.__job_store.fileExists(service_job_desc.startJobStoreID):
# Sleep to avoid thrashing
time.sleep(1.0)
if log_limiter.throttle(False):
logger.info('Service %s is starting...', service_job_desc)
# Check if the thread should quit
if self.__terminate.is_set():
return
# We don't bail out early here.
# We need to try and fail to start *all* the services, so they
# *all* come back to the leaser as expected, or the leader will get
# stuck waiting to hear about a later dependent service failing. So
# we have to *try* to start all the services, even if the services
# they depend on failed. They should already have been killed,
# though, so they should stop immediately when we run them. TODO:
# this is a bad design!
# Add the JobDescription to the output queue of jobs whose services have been started
self.__clients_out.put(client_id)
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import shlex
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.compilers
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version,
is_windows, is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku,
windows_proof_rmtree, python_command, version_compare,
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class PatchModule:
'''
Fancy monkey-patching! Whee! Can't use mock.patch because it only
patches in the local namespace.
'''
def __init__(self, func, name, impl):
self.func = func
assert(isinstance(name, str))
self.func_name = name
self.old_impl = None
self.new_impl = impl
def __enter__(self):
self.old_impl = self.func
exec('{} = self.new_impl'.format(self.func_name))
def __exit__(self, *args):
exec('{} = self.old_impl'.format(self.func_name))
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST)
# Test that bad initialization fails
self.assertRaises(TypeError, cargsfunc, [])
self.assertRaises(TypeError, cargsfunc, [], [])
self.assertRaises(TypeError, cargsfunc, cc, [], [])
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(['-I.', '-I..'], cc)
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(['-I.', '-I.'], cc), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', mesonbuild.compilers.CompilerType.GCC_STANDARD, False, MachineChoice.HOST)
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix())
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix())
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix())
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE)) + [None]
for s1, s2 in zip(sections[:], sections[1:]):
if s1.group(1) == "Universal options":
# Extract the content for this section
end = s2.start() if s2 is not None else len(md)
content = md[s1.end():end]
subsections = list(re.finditer(r"^### (.+)$", content, re.MULTILINE)) + [None]
for sub1, sub2 in zip(subsections[:], subsections[1:]):
if sub1.group(1) == "Directories" or sub1.group(1) == "Core options":
# Extract the content for this subsection
sub_end = sub2.start() if sub2 is not None else len(content)
subcontent = content[sub1.end():sub_end]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) .* \|", subcontent, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(len(found_entries & arches), 0)
found_entries |= arches
break
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md") as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt") as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_syntax_highlighting_files(self):
'''
Ensure that syntax highlighting files were updated for new functions in
the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(tempfile.mkdtemp(dir=os.getcwd()))
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, workdir=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ.copy(),
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, extra_args=None, default_args=True, inprocess=False):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, extra_args=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir)
def clean(self):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir)
def run_tests(self, inprocess=False):
if not inprocess:
self._run(self.test_command, workdir=self.builddir)
else:
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
os.environ['DESTDIR'] = self.installdir
self._run(self.install_command, workdir=self.builddir)
def uninstall(self):
self._run(self.uninstall_command, workdir=self.builddir)
def run_target(self, target):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '91 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '91 default options')
prefix = '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, args)
self.wipe()
# libdir not being inside prefix is not ok
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
for prefix in expected:
args = ['--prefix', prefix]
self.init(testdir, args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '169 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '55 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '63 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officialy in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '145 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, ['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '59 test env doesn\'t stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '134 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '135 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in shlex.split(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in shlex.split(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows():
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), shlex.split(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_OSX)
elif is_windows():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_MINGW)
elif is_cygwin():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_CYGWIN)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_STANDARD)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_OSX)
elif is_windows():
# Not implemented yet
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_MINGW)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_STANDARD)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_OSX)
elif is_windows():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_WIN)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_STANDARD)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += shlex.quote(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += shlex.quote(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '138 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = shlex.split(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '137 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse shlex.split() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
os.environ[env_var] = '-D{}="{}"'.format(define, value)
os.environ['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, ['-D{}={}'.format(define, value)])
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '114 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '61 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '95 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def dist_impl(self, vcs_init):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
vcs_init(project_dir)
self.init(project_dir)
self.build('dist')
distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
checksumfile = distfile + '.sha256sum'
self.assertPathExists(distfile)
self.assertPathExists(checksumfile)
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '43 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '155 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '155 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.compiler_type.is_windows_compiler or
compiler.compiler_type.is_osx_compiler):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
os.environ['PKG_CONFIG_LIBDIR'] = self.builddir
try:
self.init(testdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '44 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
for lang in ('c', 'cpp'):
for target_type in ('executable', 'library'):
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main() {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/cc'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, ['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, ['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, ['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '178 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '187 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
# build library
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform pathes passed via LDFLAGS
os.environ["LDFLAGS"] = '-L{}'.format(libdir.replace('\\', '/'))
self.init(os.path.join(testdirbase, 'exe'))
del os.environ["LDFLAGS"]
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with shlex
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '215 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targetting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targetting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targetting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targetting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(testdir,
'native_pkgconfig')
self.init(testdir, extra_args=['-Dstart_native=false'])
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'])
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '162 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '36 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '44 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '47 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '103 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '46 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEquals(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '79 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '79 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '53 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '56 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
dependencies_typelist = [
('name', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '60 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '61 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, extra_args=None, langs=None, meson_version=None, options=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
# Force tracebacks so we can detect them properly
os.environ['MESON_FORCE_BACKTRACE'] = '1'
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
os.environ['BOOST_ROOT'] = 'relative/path'
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf))
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
os.environ['MESON_UNIT_TEST'] = '1'
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False)
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '153 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
os.environ["LDFLAGS"] = "-Wl,-rpath,/foo/bar"
self.init(testdir)
self.build()
self.install()
del os.environ["LDFLAGS"]
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
os.environ['PKG_CONFIG_LIBDIR'] = privatedir1
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen', 'dependencies')
self.init(testdir)
privatedir2 = self.privatedir
os.environ['PKG_CONFIG_LIBDIR'] = os.pathsep.join([privatedir1, privatedir2])
self._run(['pkg-config', 'dependency-test', '--validate'])
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires']).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private']).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, ['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, ['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, ['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
Oflag = '-O3'
os.environ['CFLAGS'] = os.environ['CXXFLAGS'] = Oflag
testdir = os.path.join(self.common_test_dir, '40 has function')
self.init(testdir)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '40 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p):
lang_std = p + '_std'
# Check that all the listed -std=xxx options for this compiler work
# just fine when used
for v in compiler.get_options()[lang_std].choices:
if (compiler.get_id() == 'clang' and '17' in v and
(version_compare(compiler.version, '<5.0.0') or
(compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):
continue
if (compiler.get_id() == 'clang' and '2a' in v and
(version_compare(compiler.version, '<6.0.0') or
(compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):
continue
if (compiler.get_id() == 'gcc' and '2a' in v and version_compare(compiler.version, '<8.0.0')):
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, ['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
env_flags = p.upper() + 'FLAGS'
os.environ[env_flags] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '46 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '63 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '196 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
os.environ['PKG_CONFIG_PATH'] = testdir
self.init(testdir)
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, ['-Db_sanitize=address'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, ['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write('''[binaries]
c = '/usr/bin/cc'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''.format(os.path.join(testdir, 'some_cross_tool.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, ['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
os.environ['MESON_UNIT_TEST_PRETEND_GLIB_OLD'] = "1"
mesonbuild.modules.gnome.native_glib_version = '2.20'
self.init(testdir, inprocess=True)
self.build()
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, ['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
os.environ['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'])
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'])
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2)
self.build()
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '58 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, '')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
os.environ['PKG_CONFIG_PATH'] = pkg_dir
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'))
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '52 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '202 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
os.environ['LIBRARY_PATH'] = os.path.join(installdir, self.libdir)
os.environ['PKG_CONFIG_PATH'] = os.path.join(installdir, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
# test uninstalled
self.run_tests()
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir)
self.build()
# test running after installation
self.install()
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
os.chdir(subdir)
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
os.chdir(curdir)
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('63 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('196 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('196 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '51 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '55 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '55 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def test_std_remains(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '50 std remains')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '58 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
os.environ['CC'] = '"' + os.path.join(testdir, 'build_wrapper.py') + '"'
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
os.environ['CFLAGS'] = '-DBUILD_ENVIRONMENT_ONLY'
self.init(testdir)
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '50 std remains')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
os.environ['MESON_FORCE_BACKTRACE'] = '1'
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false', inprocess=True)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false', inprocess=True)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'])
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '55 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, ['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, ['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, ['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, ['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in kwargs:
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in kwargs.items():
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
if mesonbuild.environment.detect_msys2_arch():
f.write(r'@python3 {} %*'.format(filename))
else:
f.write('@py -3 {} %*'.format(filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr')
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functioality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
sys.exit(main())
|
rpc_methods.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines the task RPC methods."""
import logging
import os
import sys
import threading
#pylint: disable=relative-import
import common_lib
import process
class RPCMethods(object):
"""Class exposing RPC methods."""
_dotted_whitelist = ['subprocess']
def __init__(self, server):
self._server = server
self.subprocess = process.Process
def _dispatch(self, method, params):
obj = self
if '.' in method:
# Allow only white listed dotted names
name, method = method.split('.')
assert name in self._dotted_whitelist
obj = getattr(self, name)
return getattr(obj, method)(*params)
def Echo(self, message):
"""Simple RPC method to print and return a message."""
logging.info('Echoing %s', message)
return 'echo %s' % str(message)
def AbsPath(self, path):
"""Returns the absolute path."""
return os.path.abspath(path)
def Quit(self):
"""Call _server.shutdown in another thread.
This is needed because server.shutdown waits for the server to actually
quit. However the server cannot shutdown until it completes handling this
call. Calling this in the same thread results in a deadlock.
"""
t = threading.Thread(target=self._server.shutdown)
t.start()
def GetOutputDir(self):
"""Returns the isolated output directory on the task machine."""
return common_lib.GetOutputDir()
def WriteFile(self, path, text, mode='wb+'):
"""Writes a file on the task machine."""
with open(path, mode) as fh:
fh.write(text)
def ReadFile(self, path, mode='rb'):
"""Reads a file from the local task machine."""
with open(path, mode) as fh:
return fh.read()
def PathJoin(self, *parts):
"""Performs an os.path.join on the task machine.
This is needed due to the fact that there is no guarantee that os.sep will
be the same across all machines in a particular test. This method will
join the path parts locally to ensure the correct separator is used.
"""
return os.path.join(*parts)
def ListDir(self, path):
"""Returns the results of os.listdir."""
return os.listdir(path)
|
routes.py
|
# Copyright (c) 2020 The Blocknet developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import json
import logging
import os
import threading
import requests
from flask import Blueprint, Response, g, jsonify, request
from requests.auth import HTTPDigestAuth
from plugins.ethpassthrough import util
from plugins.ethpassthrough.database.models import db_session, select, Project
from plugins.ethpassthrough.middleware import authenticate
from plugins.ethpassthrough.util.request_handler import RequestHandler
app = Blueprint('eth_passthrough', __name__)
req_handler = RequestHandler()
@app.errorhandler(400)
def bad_request_error(error):
response = jsonify({
'error': 'Bad Request ' + error
})
return response
@app.errorhandler(500)
def internal_server_error(error):
response = jsonify({
'error': 'Internal Server Error'
})
return response
@app.errorhandler(401)
def unauthorized_error(error):
response = jsonify({
'error': 'Unauthorized User Access'
})
return response
@app.route('/all_projects', methods=['GET'])
def all_projects():
if not os.environ.get('DEBUG', False):
return Response({}, 401)
results = []
try:
with db_session:
query = select(p for p in Project)
results = [{
'name': p.name,
# 'api_key': p.api_key,
'api_token_count': p.api_token_count,
'used_api_tokens': p.used_api_tokens,
'expires': str(p.expires),
'active': p.active,
} for p in query]
except Exception as e:
logging.error(e)
return jsonify(results)
@app.route('/xrs/eth_passthrough/<project_id>', methods=['POST'])
@authenticate
def handle_request(project_id):
headers = {
'PROJECT-ID': project_id,
'API-TOKENS': g.project.api_token_count,
'API-TOKENS-USED': g.project.used_api_tokens,
'API-TOKENS-REMAINING': g.project.api_token_count - g.project.used_api_tokens
}
data = []
batch = False
try:
req_data = request.get_json()
if not req_data:
return bad_request_error('missing parameters')
# Check if xrouter call (this only has a single request)
if util.is_xrouter_call(req_data):
data.append(util.make_jsonrpc_data(req_data))
else: # Look for multiple requests (list of jsonrpc calls)
if isinstance(req_data, list):
batch = True
for r in req_data:
data.append(util.make_jsonrpc_data(r))
else:
data.append(util.make_jsonrpc_data(req_data))
if not data:
raise ValueError('failed to parse json data')
# Check each json rpc call
for d in data:
method = d['method']
params = d['params']
logging.debug('Received Method: {}, Params: {}'.format(method, params))
env_disallowed_methods = os.environ.get('ETH_HOST_DISALLOWED_METHODS',
'eth_accounts,db_putString,db_getString,db_putHex,db_getHex')
if method in set(env_disallowed_methods.split(',')):
return unauthorized_error(f'disallowed method {method}')
except Exception as e:
logging.debug(e)
return Response(headers=headers, response=json.dumps({
'message': "malformed json post data",
'error': 1000
}))
try:
host = os.environ.get('ETH_HOST', 'http://localhost:8545')
eth_user = os.environ.get('ETH_HOST_USER', '')
eth_pass = os.environ.get('ETH_HOST_PASS', '')
headers = {'content-type': 'application/json'}
results = []
# Make multiple requests to geth endpoint and store results
if eth_user: # only set auth params if defined
auth = HTTPDigestAuth(eth_user, eth_pass)
for d in data:
response = requests.post(host, headers=headers, data=json.dumps(d), auth=auth, timeout=15)
results.append(response.json())
else:
for d in data:
response = requests.post(host, headers=headers, data=json.dumps(d), timeout=15)
results.append(response.json())
# Update api count in background
update_api_thread = threading.Thread(target=update_api_count, name="update_api_count", args=[project_id])
update_api_thread.start()
# If batch request return list
return Response(headers=headers, response=json.dumps(results if batch or len(results) > 1 else results[0]))
except Exception as e:
logging.debug(e)
response = {
'message': "An error has occurred!",
'error': 1000
}
return Response(headers=headers, response=json.dumps(response), status=400)
@app.route('/xrs/eth_passthrough', methods=['HEAD', 'GET'])
def eth_passthough_root():
return '''
<h1>eth_passthrough is supported on this host</h1>
<div>
To get started create a project:
curl -X POST -d \'{"id": 1, "method": "request_project", "params": []}\' http://host:port/xrs/eth_passthrough
</div>
'''
@app.route('/xrs/eth_passthrough', methods=['POST'])
def xrouter_call():
try:
json_data = request.get_json(force=True)
except Exception as e:
logging.debug(e)
return bad_request_error('malformed json post data')
if 'method' in json_data and json_data['method'] == 'request_project':
project = req_handler.get_project()
logging.info('Project Requested: {}'.format(project))
return jsonify(project)
# Support XRouter calls to eth_passthrough. XRouter posts an array of parameters.
# The expected format for eth_passthrough is:
# [string, string, string_json_array]
# ["project_id", "method", "[parameters...]"]
if isinstance(json_data, list) and len(json_data) >= 3:
project_id = json_data[0]
if project_id is None or project_id is '':
return bad_request_error('Invalid project id')
data = util.make_jsonrpc_data(json_data)
if not data:
return bad_request_error('invalid post data')
# Check xrouter requests for api key
api_key = util.get_api_key(json_data)
return req_handler.post_eth_proxy_project(request.host, data, project_id, api_key)
return eth_passthough_root()
def update_api_count(project_id):
res = req_handler.post_update_api_count(project_id)
logging.debug('update_api_count {} {}'.format(project_id, res))
|
interactive.py
|
# Copyright (C) 2003-2007 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from __future__ import print_function
import socket
import sys
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = chan.recv(1024)
if len(x) == 0:
print('\r\n*** EOF\r\n', end=' ')
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
|
test_state_context.py
|
"""Tests the substate_context
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import threading
import multiprocessing
from six import StringIO
from pyexperiment import state
from pyexperiment.state_context import substate_context
from pyexperiment.state_context import thread_state_context
from pyexperiment.state_context import processing_state_context
from pyexperiment.utils.stdout_redirector import stdout_err_redirector
class TestSubStateContext(unittest.TestCase):
"""Test the substate_context
"""
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def test_set_get_first_level(self):
"""Test setting, getting sub-state at the lowest level
"""
with substate_context('test'):
state['a'] = 123
self.assertEqual(state['a'], 123)
self.assertEqual(state['test.a'], 123)
self.assertRaises(KeyError, state.__getitem__, 'a')
def test_set_get_higher_levels(self):
"""Test setting, getting sub-state at the higher levels
"""
with substate_context('test'):
state['a.b'] = 123
state['c.d.e'] = 345
self.assertEqual(state['a.b'], 123)
self.assertEqual(state['c.d.e'], 345)
self.assertEqual(state['test.a.b'], 123)
self.assertEqual(state['test.c.d.e'], 345)
self.assertRaises(KeyError, state.__getitem__, 'a.b')
self.assertRaises(KeyError, state.__getitem__, 'c.d.e')
def test_global_state(self):
"""Test setting, getting global state in sub-state context
"""
with substate_context('test'):
state['a.b'] = 123
state['c.d.e'] = 345
state['__foo'] = 42
state['__bar.foo'] = 43
self.assertEqual(state['a.b'], 123)
self.assertEqual(state['c.d.e'], 345)
self.assertEqual(state['__foo'], 42)
self.assertEqual(state['__bar.foo'], 43)
self.assertEqual(state['test.a.b'], 123)
self.assertEqual(state['test.c.d.e'], 345)
self.assertEqual(state['__foo'], 42)
self.assertEqual(state['__bar.foo'], 43)
self.assertRaises(KeyError, state.__getitem__, 'a.b')
self.assertRaises(KeyError, state.__getitem__, 'c.d.e')
def test_get_section(self):
"""Test getting a section of the state
"""
with substate_context('test'):
state['a.a'] = 12
state['a.b'] = 13
self.assertIn('a', state)
self.assertIn('a.a', state)
self.assertIn('a.b', state)
self.assertEqual(state['a.a'], 12)
self.assertEqual(state['a.b'], 13)
def test_get_nonexisting(self):
"""Test getting an item of the state that does not exist
"""
with substate_context('test'):
self.assertRaises(KeyError, lambda: state['bla'])
def test_iterate(self):
"""Test iterating over sub state
"""
with substate_context('test'):
state['a'] = 1
state['b'] = 2
for elem in state:
if elem == 'a':
self.assertEqual(state[elem], 1)
elif elem == 'b':
self.assertEqual(state[elem], 2)
else:
assert False
class TestThreadStateContext(unittest.TestCase):
"""Test the thread_state_context
"""
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def test_basic_functionality(self):
"""Test setting, getting sub-state in 20 threads
"""
with thread_state_context():
def worker(i):
"""thread worker function"""
state[str(i)] = i
self.assertEqual(state[str(i)], i)
threads = []
for i in range(20):
thread = threading.Thread(target=worker, args=(i,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
for i in range(len(threads)):
self.assertEqual(state[str(i)], i)
def test_delete_nonexisting(self):
"""Test deleting non-existing sub-state in threads
"""
with thread_state_context():
def worker():
"""thread worker function"""
def dell():
"""Test function"""
del state['foo']
self.assertRaises(KeyError, dell)
threads = []
for _ in range(20):
thread = threading.Thread(target=worker)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def test_after_exception(self):
"""Test setting, getting state after exception in threads
"""
state['a'] = 1
buf_out = StringIO()
buf_err = StringIO()
try:
with stdout_err_redirector(buf_out, buf_err):
with thread_state_context():
def worker():
"""thread worker function"""
raise RuntimeError
threads = []
for _ in range(20):
thread = threading.Thread(target=worker)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
raise RuntimeError
except RuntimeError:
pass
self.assertEqual(state['a'], 1)
def worker1(i):
"""Process worker function, needs to be defined at top level"""
state[str(i)] = i
if not state[str(i)] == i:
return False
else:
return True
def worker2(i):
"""Process worker function, needs to be defined at top level"""
state[str(i)] = 'bla'
del state[str(i)]
def worker3(i):
"""Process worker function, needs to be defined at top level"""
try:
_ = state[str(i)]
except KeyError:
return True
return False
def worker4():
"""Process worker function, needs to be defined at top level"""
try:
state[[1, 2, 3]] = 12
except TypeError:
return True
return False
def worker5():
"""Process worker function, needs to be defined at top level"""
try:
del state['bla']
except KeyError:
return True
return False
class TestProcessingStateContext(unittest.TestCase):
"""Test the processing_state_context
"""
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def test_basic_functionality(self):
"""Test setting, getting state in 4 processes
"""
with processing_state_context():
n_jobs = 2
pool = multiprocessing.Pool(processes=4)
results = []
for i in range(n_jobs):
results.append(pool.apply_async(worker1, (i,)))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
self.assertEqual(state[str(i)], i)
def test_deleting(self):
"""Test deleting state in 4 processes
"""
with processing_state_context():
n_jobs = 2
pool = multiprocessing.Pool(processes=4)
results = []
for i in range(n_jobs):
results.append(pool.apply_async(worker2, (i,)))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertNotIn(str(i), state)
def test_raises_on_getting(self):
"""Test getting non-existing state in 4 processes
"""
with processing_state_context():
n_jobs = 200
pool = multiprocessing.Pool(processes=4)
results = []
for i in range(n_jobs):
results.append(pool.apply_async(worker3, (i,)))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
def test_raises_on_setting(self):
"""Test setting bad state in 4 processes
"""
with processing_state_context():
n_jobs = 200
pool = multiprocessing.Pool(processes=4)
results = []
for _ in range(n_jobs):
results.append(pool.apply_async(worker4))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
def test_raises_on_deleting(self):
"""Test deleting bad state in 4 processes
"""
with processing_state_context():
n_jobs = 200
pool = multiprocessing.Pool(processes=4)
results = []
for _ in range(n_jobs):
results.append(pool.apply_async(worker5))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
def test_after_exception(self):
"""Test deleting bad state in 4 processes
"""
state['a'] = 12
try:
with processing_state_context():
raise RuntimeError
except RuntimeError:
pass
self.assertEqual(state['a'], 12)
if __name__ == '__main__':
unittest.main()
|
dispatch.py
|
"""
File : dispatch.py
Author : ian
Created : 04-21-2017
Last Modified By : ian
Last Modified On : 04-21-2017
***********************************************************************
The MIT License (MIT)
Copyright © 2017 Ian Cooper <ian_hammond_cooper@yahoo.co.uk>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
***********************************************************************
"""
import logging
import time
from enum import Enum
from multiprocessing import Event, Process
from threading import Thread
from typing import Callable, Dict
from brightside.channels import Channel
from brightside.command_processor import CommandProcessor, Request
from brightside.connection import Connection
from brightside.exceptions import ConfigurationException, MessagingException
from brightside.message_factory import create_quit_message
from brightside.message_pump import MessagePump
from brightside.messaging import BrightsideConsumerConfiguration, BrightsideConsumer, BrightsideMessage
class Performer:
def __init__(self,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request],
logger: logging.Logger=None
) -> None:
"""
Each Performer abstracts a process running a message pump.
That process is forked from the parent, as we cannot guarantee a message pump is only I/O bound and thus will
not scale because of the GIL.
The Performer is how the supervisor (the dispatcher) tracks the workers it has created
The Performer needs:
:param channel_name: The name of the channel we want to create a sub-process for
:param connection: The connection to the broker
:param consumer_factory: We need a user supplied callback to provide us an instance of the concumer for
the broker we are using. Arame? Something else?
:param command_processor_factory: We need a user supplied callback to create a commandprocessor with
subscribers, policies, outgoing tasks queues etc.
:param mapper_func: We need a user supplied callback to map on the wire messages to requests
"""
# TODO: The paramater needs to be a connection, not an AramaConnection as we can't decide to create an Arame Consumer
# here. Where do we make that choice?
self._channel_name = channel_name
self._connection = connection
self._consumer_configuration = consumer_configuration
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
self._logger = logger or logging.getLogger(__name__)
def stop(self) -> None:
self._consumer_configuration.pipeline.put(create_quit_message())
def run(self, started_event: Event) -> Process:
p = Process(target=_sub_process_main, args=(
started_event,
self._channel_name,
self._connection,
self._consumer_configuration,
self._consumer_factory,
self._command_processor_factory,
self._mapper_func))
self._logger.debug("Starting worker process for channel: %s on exchange %s on server %s",
self._channel_name, self._connection.exchange, self._connection.amqp_uri)
p.start()
started_event.wait(timeout=1)
return p
def _sub_process_main(started_event: Event,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
This is the main method for the sub=process, everything we need to create the message pump and
channel it needs to be passed in as parameters that can be pickled as when we run they will be serialized
into this process. The data should be value types, not reference types as we will receive a copy of the original.
Inter-process communication is signalled by the event - to indicate startup - and the pipeline to facilitate a
sentinel or stop message
:param started_event: Used by the sub-process to signal that it is ready
:param channel_name: The name we want to give the channel to the broker for identification
:param connection: The 'broker' connection
:param consumer_configuration: How to configure our consumer of messages from the channel
:param consumer_factory: Callback to create the consumer. User code as we don't know what consumer library they
want to use. Arame? Something else?
:param command_processor_factory: Callback to register subscribers, policies, and task queues then build command
processor. User code that provides us with their requests and handlers
:param mapper_func: We need to map between messages on the wire and our handlers
:return:
"""
logger = logging.getLogger(__name__)
consumer = consumer_factory(connection, consumer_configuration, logger)
channel = Channel(name=channel_name, consumer=consumer, pipeline=consumer_configuration.pipeline)
# TODO: Fix defaults that need passed in config values
command_processor = command_processor_factory(channel_name)
message_pump = MessagePump(command_processor=command_processor, channel=channel, mapper_func=mapper_func,
timeout=500, unacceptable_message_limit=None, requeue_count=None)
logger.debug("Starting the message pump for %s", channel_name)
message_pump.run(started_event)
class ConsumerConfiguration:
def __init__(self,
connection: Connection,
consumer: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
The configuration parameters for one consumer - can create one or more performers from this, each of which is
a message pump reading from a queue
:param connection: The connection to the broker
:param consumer: The consumer we want to create (routing key, queue etc)
:param consumer_factory: A factory to create a consumer to read from a broker, a given implementation i.e. arame
the command processor factory creates a command processor configured for a pipeline
:param mapper_func: Maps between messages on the queue and requests (commnands/events)
"""
self._connection = connection
self._consumer = consumer
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
@property
def connection(self) -> Connection:
return self._connection
@property
def brightside_configuration(self) -> BrightsideConsumerConfiguration:
return self._consumer
@property
def consumer_factory(self) -> Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer]:
return self._consumer_factory
@property
def command_processor_factory(self):
return self._command_processor_factory
@property
def mapper_func(self) -> Callable[[BrightsideMessage], Request]:
return self._mapper_func
class DispatcherState(Enum):
ds_awaiting = 0,
ds_notready = 1,
ds_running = 2,
ds_stopped = 3,
ds_stopping = 4
class Dispatcher:
"""
The dispatcher orchestrates the creation of consumers, where a consumer is the sub-process that runs a message pump
to consumer messages from a given channel and dispatch to handlers. The dispatcher can start more than one performer
for a given channel.
The dispatcher also orchestrates the shutdown of consumers. It does this by posting a stop message into each running
consumers queue, thus allowing the current handler to run to completion but killing the consumer before it can
consume another work item from the queue.
As such the dispatcher tracks consumer instances.
In addition, as we must pass a factory method to the sub-process that creates the command processor for that channel
i.e. handler and policy registration, outgoing queues, the Dispatcher also acts a registry of those factory methods
for individual channels.
THe dispatcher uses a thread to 'stay running' until end is called. This means that receive is non-blocking. The
supervisor thread yields regularly to avoid spinning the CPU. This means there can be a delay between signalling to
end and the shutdown beginning.
Shutdown will finish work in progress, as it inserts a quit message in the queue that gets consumerd 'next'
"""
def __init__(self, consumers: Dict[str, ConsumerConfiguration]) -> None:
self._state = DispatcherState.ds_notready
self._consumers = consumers
self._performers = {k: Performer(
k,
v.connection,
v.brightside_configuration,
v.consumer_factory,
v.command_processor_factory,
v.mapper_func)
for k, v in self._consumers.items()}
self._running_performers = {}
self._supervisor = None
self._state = DispatcherState.ds_awaiting
@property
def state(self):
return self._state
def receive(self):
def _receive(dispatcher: Dispatcher, initialized: Event) -> None:
for k, v in self._performers.items():
event = Event()
dispatcher._running_performers[k] = v.run(event)
event.wait(3) # TODO: Do we want to configure this polling interval?
initialized.set()
while self._state == DispatcherState.ds_running:
time.sleep(5) # yield to avoid spinning, between checking for changes to state
if self._state == DispatcherState.ds_awaiting:
initialized = Event()
self._supervisor = Thread(target=_receive, args=(self, initialized))
initialized.wait(5) # TODO: Should this be number of performs and configured with related?
self._state = DispatcherState.ds_running
self._supervisor.start()
def end(self):
if self._state == DispatcherState.ds_running:
for channel, process in list(self._running_performers.items()):
self._performers[channel].stop()
process.join(10) # TODO: We really want to make this configurable
self._state = DispatcherState.ds_stopping
self._supervisor.join(5)
self._running_performers.clear()
self._supervisor = None
self._state = DispatcherState.ds_stopped
# Do we want to determine if any processes have failed to complete Within the time frame
def open(self, consumer_name: str) -> None:
# TODO: Build then refactor with receive
# Find the consumer
if consumer_name not in self._consumers:
raise ConfigurationException("The consumer {} could not be found, did you register it?".format(consumer_name))
consumer = self._consumers[consumer_name]
performer = Performer(consumer_name,
consumer.connection,
consumer.brightside_configuration,
consumer.consumer_factory,
consumer.command_processor_factory,
consumer.mapper_func)
self._performers[consumer_name] = performer
# if we have a supervisor thread
if self._state == DispatcherState.ds_running:
# start and add to items monitored by supervisor (running performers)
pass
# else
elif self._state == DispatcherState.ds_stopped:
# start the supervisor with the single consumer
self._state = DispatcherState.ds_awaiting
self.receive()
else:
raise MessagingException("Dispatcher in a un-recognised state to open new connection; state was {}", self._state)
|
hdvMissingItems.py
|
print('Importing sources ...')
from .pricesListing import kamasToString, craftPrice
from pyasn1.type.univ import Boolean
from sniffer import protocol
import time, datetime
from colorama import Fore
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from sources.item import gameItems, itemToName, ressourcesId, recipes, prices
print('Sources imported !')
# Print iterations progress
requestCount = 0
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
global requestCount
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
requestCount += 1
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
class MissingItemLookup:
def __init__(self, gui) -> Boolean:
# use creds to create a client to interact with the Google Drive API
scope = ['https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('access/client_secret.json', scope)
self._client = gspread.authorize(creds)
self._alreadyMissingItems = {}
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
self._spreadSheet = self._client.open("Missing Items")
# Extract and print all of the values
infos = self._spreadSheet.worksheet("Infos").get_all_records()
lastSave = infos[0]['Last save']
self._workSheets = self._spreadSheet.worksheets()
self._currentDateFormatted = str(datetime.datetime.today().strftime ('%d-%m-%Y')) # format the date to ddmmyyyy
self._isCurrentDay = (lastSave == self._currentDateFormatted)
if self._isCurrentDay:
self.overWrite = gui.overWrite()
if self.overWrite:
# t = threading.Thread(target=self.moduleInitialization, name="Module Initialization")
# t.start()
self.moduleInitialization()
else:
gui.abortWindow()
else:
self.moduleInitialization()
def moduleInitialization(self):
self._idToType = {
1: 'Amulette',
9: 'Anneau',
10: 'Ceinture',
11: 'Bottes',
82: 'Bouclier',
16: 'Coiffe',
17: 'Cape',
81: 'Cape', # Sac à Dos
2: 'Arme', # Tous les types d'arme
3: 'Arme',
4: 'Arme',
5: 'Arme',
6: 'Arme',
7: 'Arme',
8: 'Arme',
19: 'Arme',
21: 'Arme',
22: 'Arme',
114: 'Arme',
151: 'Trophée'
}
self._missingItems = {}
for type in self._idToType.values():
try:
self._missingItems[type]
except KeyError:
self._missingItems[type] = {}
countTypes = len(self._missingItems)
print(Fore.YELLOW + "Getting old items" + Fore.RESET)
printProgressBar(0, countTypes, prefix = 'Progress:', suffix = 'Received', length = 50)
ind = 1
for sheetIndex in range(countTypes):
sheet = self._workSheets[sheetIndex]
self._alreadyMissingItems[sheet.title] = sheet.get_all_records()
printProgressBar(ind, countTypes, prefix = 'Progress:', suffix = 'Received', length = 50)
ind += 1
self.abort = False
return False
def _indexOf(self, nameToLookFor, dictList):
index = 0
for item in dictList:
if item['Nom'] == nameToLookFor:
return index
else:
index += 1
return -1
def _idsFromType(self, inputType):
ids = []
for intType, strType in self._idToType.items():
if strType == inputType:
ids.append(intType)
return ids
def packetRead(self, msg):
if msg.id == 7549:
packet = protocol.read(protocol.msg_from_id[msg.id]["name"], msg.data)
if packet['objectType'] not in self._idToType.keys():
return
for item in gameItems[str(packet['objectType'])]:
if item['id'] not in packet['typeDescription'] and item['craftable']:
self._missingItems[self._idToType[packet['objectType']]][item['id']] = item
print("Catégorie " + Fore.CYAN + self._idToType[packet['objectType']] + Fore.RESET + " ajoutée")
def saveMissingItems(self):
global requestCount
# Failsafe
for itemType in self._missingItems.keys():
if len(self._missingItems[itemType]) == 0:
return
typeProgress = 0
requestCount = 0
countTypes = len(self._missingItems)
print(Fore.YELLOW + "Sending new items" + Fore.RESET)
printProgressBar(0, countTypes, prefix = 'Progress:', suffix = 'Sent', length = 50)
for itemType, itemList in self._missingItems.items():
newRows = []
oldRows = {}
updateRows = []
deleteRows = []
# Selecting old items that will be deleted (not missing anymore)
rowToDelete = 1
for itemAlreadyMissing in self._alreadyMissingItems[itemType]:
for id in self._idsFromType(itemType):
for dbItem in gameItems[str(id)]:
if itemToName[dbItem['id']] == itemAlreadyMissing['Nom']:
if not self._isCurrentDay:
dayCount = itemAlreadyMissing['Jours consécutifs'] + 1
else:
dayCount = itemAlreadyMissing['Jours consécutifs']
effects = ', '.join(dbItem['effects'])
price = kamasToString(craftPrice(dbItem['id']))
oldRows[self._indexOf(itemToName[dbItem['id']], self._alreadyMissingItems[itemType])] = [effects, price, dayCount]
break
for item in itemList.values():
# Check if the item is already missing
alreadyMissing = False
for itemAlreadyMissing in self._alreadyMissingItems[itemType]:
if itemAlreadyMissing['Nom'] == itemToName[item['id']]:
alreadyMissing = True
break
if not alreadyMissing:
effects = ', '.join(item['effects'])
price = kamasToString(craftPrice(item['id']))
newRows.append(
[item['level'], itemToName[item['id']], effects, price, 0, ""]
)
progress = 1
for i in range(len(oldRows)):
for itemIndex, itemToUpdate in oldRows.items():
if i == itemIndex:
updateRows.append(itemToUpdate)
break
# for rowToDelete in deleteRows:
# self._spreadSheet.worksheet(itemType).delete_row(rowToDelete)
# printProgressBar((progress/(len(deleteRows) + 3)) + typeProgress, countTypes, prefix = 'Progress:', suffix = 'Sent', length = 50)
# progress += 1
# self._spreadSheet.worksheet(itemType).delete_rows(2, len(self._alreadyMissingItems[itemType]))
self._spreadSheet.worksheet(itemType).update("C2:E" + str(len(updateRows) + 1), updateRows)
printProgressBar(0.5 + typeProgress, countTypes, prefix = 'Progress:', suffix = 'Sent', length = 50)
progress += 1
self._spreadSheet.worksheet(itemType).insert_rows(newRows, row = 2 + len(oldRows))
printProgressBar(1 + typeProgress, countTypes, prefix = 'Progress:', suffix = 'Sent', length = 50)
typeProgress += 1
self._spreadSheet.worksheet("Infos").update_cell(2, 1, self._currentDateFormatted)
printProgressBar(countTypes, countTypes, prefix = 'Progress:', suffix = 'Sent', length = 50)
|
callback.py
|
from utlis.rank import setrank,isrank,remrank,remsudos,setsudo,GPranks,IDrank
from utlis.send import send_msg, BYusers, Sendto, fwdto,Name,Glang,getAge
from utlis.locks import st,getOR,Clang,st_res
from utlis.tg import Bot
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json,datetime,os
import importlib
from os import listdir
from os.path import isfile, join
def updateCallback(client, callback_query,redis):
try:
json.loads(callback_query.data)
except Exception as e:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
if callback_query.inline_message_id:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
userID = callback_query.from_user.id
chatID = callback_query.message.chat.id
userFN = callback_query.from_user.first_name
title = callback_query.message.chat.title
message_id = callback_query.message.message_id
date = json.loads(callback_query.data)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
lang = Glang(redis,chatID)
moduleCMD = "lang."+lang+"-cmd"
moduleREPLY = "lang."+lang+"-reply"
c = importlib.import_module(moduleCMD)
r = importlib.import_module(moduleREPLY)
if date[0] == "Cordertow":
rank = isrank(redis,userID,chatID)
if (rank is "sudo" or rank is "sudos" or rank is "creator" or rank is "owner"):
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1]):
GetGprank = GPranks(date[1],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[1]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
return False
if date[0] == "delBL":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chat,Hash))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if re.search("del(.*)replys$",date[0]):
t = date[0].replace("del","")
if date[1] != "kb":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,date[1],t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("del(.*)replysBOT",date[0]):
rank = isrank(redis,userID,chatID)
if rank == "sudo":
t = date[0].replace("del","")
t = t.replace("BOT","")
if date[1] != "kb":
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "delfromb":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockSTICKERs":
ID = callback_query.message.sticker.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockphotos":
ID = callback_query.message.photo.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
User_click = int((redis.get("{}Nbot:{}:floodClick".format(BOT_ID,userID)) or 1))
if User_click > 10:
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,userFN)
Bot("sendMessage",{"chat_id":chatID,"text":r.banclick.format(BY),"disable_web_page_preview":True,"parse_mode":"html"})
redis.setex("{}Nbot:floodUsers:{}".format(BOT_ID,userID),60*2,"Ban")
redis.delete("{}Nbot:{}:floodClick".format(BOT_ID,userID))
if chatID == userID:
group = True
if group is True and int(date[2]) == userID and not redis.get("{}Nbot:floodUsers:{}".format(BOT_ID,userID)):
if date[0] == "delcheck":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.notcertain, callback_data=json.dumps(["kickcheck","",userID])),InlineKeyboardButton(r.certain, callback_data=json.dumps(["certain","",userID]))]])
random.shuffle(reply_markup.inline_keyboard[0])
Bot("editMessageText",{"chat_id":chatID,"text":r.ucertain,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "certain":
Bot("restrictChatMember",{"chat_id": chatID,"user_id":userID,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
Bot("editMessageText",{"chat_id":chatID,"text":r.unrestricted.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickcheck":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
crid = redis.get("{}Nbot:{}:creator".format(BOT_ID,chatID))
redis.sadd("{}Nbot:{}:bans".format(BOT_ID,chatID),userID)
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton(r.Corder, callback_data=json.dumps(["Cordertow",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.bancheck.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "addor":
cpKey = date[1]
kbList = {"vip":{
"cb":"^%(tx)s$|^%(tx)s @(.*)$|%(tx)s [0-9]+$",
"cb2":"^%(tx)s [0-9]+$",
"rp":c.orvip,
},"owner":{
"cb":"^%(tx)s$|^%(tx)s @(.*)$|%(tx)s [0-9]+$",
"cb2":"^%(tx)s [0-9]+$",
"rp":c.orow,
},"admin":{
"cb":"^%(tx)s$|^%(tx)s @(.*)$|%(tx)s [0-9]+$",
"cb2":"^%(tx)s [0-9]+$",
"rp":c.orad,
},"ban":{
"cb":"^%(tx)s$|^%(tx)s @(.*)$|%(tx)s [0-9]+$",
"cb2":"^%(tx)s [0-9]+$",
"rp":c.orban,
},"tk":{
"cb":"^%(tx)s$|^%(tx)s @(.*)$|%(tx)s [0-9]+$",
"cb2":"^%(tx)s [0-9]+$",
"rp":c.ortk,
}
}
cc = re.findall(c.addor,callback_query.message.reply_to_message.text)
F = kbList[cpKey]
cb = F["cb"] % {'tx': cc[0]}
cb2 = F["cb2"] % {'tx': cc[0]}
print(cb,cb2)
redis.hset("{}Nbot:{}or:cb".format(BOT_ID,cpKey),chatID,cb)
redis.hset("{}Nbot:{}or:cb2".format(BOT_ID,cpKey),chatID,cb2)
callback_query.message.edit_text(r.Daddor.format(cc[0],F["rp"]))
if date[0] == "delF":
File = date[1]
os.system("rm ./files/"+File)
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfile.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "delFa":
os.system("rm -rf ./files/*")
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfiles,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "dlf":
File = date[1]
os.system("rm ./files/"+File)
url = "https://raw.githubusercontent.com/NewBotMD/NB-files/master/"+File
out = requests.get(url).text
f = open("./files/"+File,"w+")
f.write(out)
f.close()
Bot("editMessageText",{"chat_id":chatID,"text":r.Dua.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "au":
File = date[1]
if redis.sismember("{}Nbot:botfiles".format(BOT_ID),File):
redis.srem("{}Nbot:botfiles".format(BOT_ID),File)
else:
redis.sadd("{}Nbot:botfiles".format(BOT_ID),File)
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
array = []
for f in onlyfiles:
if f in filesR:
s = r.true
else:
s = r.false
array.append([InlineKeyboardButton(f+" "+s,callback_data=json.dumps(["au",f,userID]))])
kb = InlineKeyboardMarkup(array)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "twostepset":
get = date[1]
if get == "eq":
redis.hset("{}Nbot:bancheck:t".format(BOT_ID),chatID,"two")
tx = r.Ttwo
g= "two"
if get == "two":
redis.hdel("{}Nbot:bancheck:t".format(BOT_ID),chatID)
g= "eq"
tx = r.Teq
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.tset.format(tx),callback_data=json.dumps(["twostepset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "floodset":
get = date[1]
if get == "ban":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"res")
tx = r.Tres
g= "res"
if get == "res":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"ban")
g= "ban"
tx = r.Tban
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.fset.format(tx),callback_data=json.dumps(["floodset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "delmsgclick":
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
Bot("deleteMessage",{"chat_id":chatID,"message_id":callback_query.message.reply_to_message.message_id})
if date[0] == "ckGPs":
rank = isrank(redis,userID,chatID)
if rank == "sudo":
Bot("editMessageText",{"chat_id":chatID,"text":r.ckpr,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
IDS = redis.smembers("{}Nbot:groups".format(BOT_ID))
i = 0
for ID in IDS:
get = Bot("getChat",{"chat_id":ID})
if get["ok"] == False:
redis.srem("{}Nbot:groups".format(BOT_ID),ID)
redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),ID)
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1)
redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),ID,str(NextDay_Date))
i+=1
time.sleep(0.3)
pr = redis.scard("{}Nbot:privates".format(BOT_ID))
gp = redis.scard("{}Nbot:groups".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.showstats.format(gp,pr)+r.Dckg.format(i),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "Chlang":
name = date[1]
redis.srem("{}Nbot:lang:ar".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:arem".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:en".format(BOT_ID),chatID)
redis.sadd("{}Nbot:lang:{}".format(BOT_ID,name),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":Clang(client, callback_query,redis,r)})
if date[0] == "ShowDateUser":
t = IDrank(redis,userID,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),userID) or 0)
rate = int(msgs)*100/20000
age = getAge(userID,r)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(Name(userFN),url="t.me/nbbot")],[InlineKeyboardButton(r.Rrank.format(t),url="t.me/nbbot")],[InlineKeyboardButton(r.Rmsgs.format(msgs),url="t.me/nbbot")],[InlineKeyboardButton(r.Rrate.format(str(rate)+"%"),url="t.me/nbbot")],[InlineKeyboardButton(r.Redits.format(edits),url="t.me/nbbot")],[InlineKeyboardButton(r.Rage.format(age),url="t.me/nbbot")]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("ShowO",date[0]):
T = date[0].replace("ShowO","")
rank = isrank(redis,userID,chatID)
if T == "lock":
reply_markup = getOR(rank,r,userID)
tx = r.LockO
if T == "admin":
reply_markup = getOR(rank,r,userID)
tx = r.AdminO
if T == "owner":
reply_markup = getOR(rank,r,userID)
tx = r.OwnerO
if T == "creator":
reply_markup = getOR(rank,r,userID)
tx = r.CreatorO
if T == "sudos":
reply_markup = getOR(rank,r,userID)
tx = r.SudosO
if T == "sudo":
reply_markup = getOR(rank,r,userID)
tx = r.SudoO
Bot("editMessageText",{"chat_id":chatID,"text":tx,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "sendtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "sendtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "kickme-yes":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
Bot("unbanChatMember",{"chat_id":chatID,"user_id":userID})
Bot("editMessageText",{"chat_id":chatID,"text":r.Dkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickme-no":
Bot("editMessageText",{"chat_id":chatID,"text":r.Nkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "delfromb":
Hash = date[1]
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chatId,TY),ID)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneUNblock,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Blocklist":
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showBlocklist","",userID])),InlineKeyboardButton(c.STgifs,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockanimations")),],[InlineKeyboardButton(c.STphoto,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockphotos")),InlineKeyboardButton(c.STsticker,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockSTICKERs")),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.blocklist2,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylist":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylist","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylist","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylist","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylist,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylistBOT":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylistBOT","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylistBOT","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylistBOT","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylistBot,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "alllist":
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(c.STbanall,callback_data=json.dumps(["showbanall","",userID]))
,InlineKeyboardButton(c.STtkall,callback_data=json.dumps(["showtkall","",userID])),]
])
Bot("editMessageText",{"chat_id":chatID,"text":r.banlist,"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delallban":
redis.delete("{}Nbot:bans".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddelbanall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delalltk":
redis.delete("{}Nbot:restricteds".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddeltkall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "showBlocklist":
li = redis.smembers("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - "+word
i += 1
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.BlocklistRm,callback_data=json.dumps(["delListblockTEXTs","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.BlocklistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showbanall":
arrays = redis.smembers("{}Nbot:bans".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.allbandel,callback_data=json.dumps(["delallban","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.allbanE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showtkall":
arrays = redis.smembers("{}Nbot:restricteds".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.alltkdel,callback_data=json.dumps(["delalltk","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.alltkE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylist":
li = redis.hkeys("{}Nbot:{}:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylist":
li = redis.hkeys("{}Nbot:{}:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylist":
li = redis.hkeys("{}Nbot:{}:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylist":
li = redis.hkeys("{}Nbot:{}:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylistBOT":
li = redis.hkeys("{}Nbot:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylistBOT":
li = redis.hkeys("{}Nbot:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylistBOT":
li = redis.hkeys("{}Nbot:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylistBOT":
li = redis.hkeys("{}Nbot:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "listCH":
if int(date[1]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
if date[0] == "listCH-res":
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[1]))})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[1]))})
if date[0] == 'LU-res':
d = date[1].split("-")
lock = d[0]
lockres = d[0]+":"+d[1]
if redis.sismember("{}Nbot:{}".format(BOT_ID,lockres),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,lockres),chatID)
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,lockres),chatID)
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[3]))})
if date[0] == 'LU':
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
save = redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
else:
save = redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
if int(date[3]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
if date[0] == "delListblockTEXTs":
redis.delete("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListbans":
arrays = redis.smembers("{}Nbot:{}:bans".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":user})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListrestricteds":
arrays = redis.smembers("{}Nbot:{}:restricteds".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": user,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "LandU":
if date[3] == "LtoU":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[3] == "UtoL":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Corder":
if date[1] == "bans":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("kickChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[1] == "restricteds":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,})
redis.sadd("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delList":
H = date[1]
if H != "sudos" and H != "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "sudos":
redis.delete("{}Nbot:sudos".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id})
elif int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
|
sentry.py
|
from multiprocessing import Process
from threading import Thread
import subprocess, shlex
import yaml, json
import sys, time, os, datetime
LOGFILE = 'status.csv'
class StatusNode(object):
def __init__(self, node):
if isinstance(node, HostNode):
self.id = node.hostname+':' + node.ip
self.name = node.hostname
self.children = [StatusNode(service) for service in node.services]
self.children += [StatusNode(host) for host in node.children]
elif isinstance(node, ServiceNode):
self.id = node.name + '@' + node.ip
self.name = node.name
self.children = []
else:
raise TypeError()
self.data = {'up':node.up}
class HostNode(object):
def __init__(self, configObject):
self.hostname = configObject['hostname']
self.ip = configObject['ip']
self.resolver = configObject['resolver']
self.fqdn = configObject['fqdn']
self.children = [HostNode(child) for child in configObject['children']]
self.services = [ServiceNode(service, self) for service in configObject['services']]
self.up = False
class ServiceNode(object):
def __init__(self, serviceObject, host):
self.ip = host.ip
self.name = serviceObject['name']
self.port = serviceObject['port']
self.check = serviceObject['check']
self.up = False
class Sentry(object):
#constructor, set up scanner
def __init__(self, delay=60):
self.delay = delay
self.root = None
self.loadConfig()
scanner = Thread(target=self)
scanner.daemon = True
scanner.start()
#scanner target, runs as a daemon
def __call__(self):
try:
file_exists = os.path.isfile(LOGFILE)
#if file is empty write header
if not file_exists or os.stat(LOGFILE).st_size == 0:
headers = ['time']
self.getServiceNameList(self.root, headers)
with open(LOGFILE, 'a' if file_exists else 'w') as statuslog:
statuslog.write(', '.join(i for i in headers)+'\n')
#begin scanning for services
while True:
now = datetime.datetime.now()
now = str(now.hour % 12) + ':' + str(now.minute)
statuses = [now]
self.scanHost(self.root, statuses)
with open(LOGFILE,'r') as statuslog:
h = statuslog.readline().split(', ')
if len(statuses) != len(h):
print(h)
print(statuses)
raise ValueError('active status array not the same size as the header line')
with open(LOGFILE,'a') as statuslog:
statuslog.write(', '.join(str(i) for i in statuses)+'\n')
time.sleep(self.delay)
except (KeyboardInterrupt):
pass
def getRecentStatuses(self):
result = {}
with open(LOGFILE, 'r') as statuslog:
result['headers'] = statuslog.readline().replace('\n','').split(', ')
recent = [line.replace('\n','').split(', ') for line in statuslog.readlines()]
if len(recent) <= 10:
result['status'] = recent
else:
result['status'] = recent[-10:]
return json.dumps(result)
def getServiceNameList(self, node, namelist):
for i in node.services:
namelist.append(i.name + '@' + i.ip)
for i in node.children:
self.getServiceNameList(i,namelist)
#scan a single host, and it's services
def scanHost(self, host, logList=None, recurse=True):
#ping(host.ip) |> host.up
host.up = not os.system('ping -c 1 -w2 ' + host.ip + ' > /dev/null')
for service in host.services:
self.scanService(service, logList)
if recurse:
for child in host.children:
self.scanHost(child, logList)
#scan a single service
def scanService(self, service,logList=None):
nmapcmd = '/usr/bin/nmap -Pn -p ' + str(service.port) + ' ' + service.ip
service.up = 'open' in str(subprocess.check_output(nmapcmd, shell=True))
checkcmd = service.check
service.responsive = False
if checkcmd != '':
#print(checkcmd)
try:
output = subprocess.check_output(checkcmd, shell=True)
service.responsive = output in ['1', '1\n', b'1', b'1\n']
print(service.responsive)
except PermissionError:
print('PermissionError executing \"' + checkcmd + '\" , service check fails')
except subprocess.CalledProcessError:
print('weird error executing checkcmd, non-zero exit status')
result = 1 if service.responsive else 0
logList.append(str(result))
#get tree representation of the status of the network
def getStatusTree(self):
return json.dumps(StatusNode(self.root),default=lambda o: o.__dict__, sort_keys=True)
#load file
def loadConfig(self):
config = None
try:
with open('config.yaml','r') as config_file:
config = yaml.load(config_file)
except FileNotFoundError:
print('Fatal: Config File "config.yaml" not found!')
sys.exit(1)
if config is None:
print('Fatal: failed to load config')
sys.exit(1)
#reverse parent links to be links to children
hosts_by_name = {}
for host in config:
hosts_by_name[host['hostname']] = host
host['children'] = []
for host in config:
if host['parent'] is not None:
hosts_by_name[host['parent']]['children'].append(host)
del host['parent']
#import the config into useful objects
self.root = HostNode(config[0])
|
example3.py
|
# ch05/example3.py
import threading
import requests
import time
def ping(url):
res = requests.get(url)
print(f'{url}: {res.text}')
urls = [
'http://httpstat.us/200',
'http://httpstat.us/400',
'http://httpstat.us/404',
'http://httpstat.us/408',
'http://httpstat.us/500',
'http://httpstat.us/524'
]
start = time.time()
for url in urls:
ping(url)
print(f'Sequential: {time.time() - start : .2f} seconds')
print()
start = time.time()
threads = []
for url in urls:
thread = threading.Thread(target=ping, args=(url,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
print(f'Threading: {time.time() - start : .2f} seconds')
|
process_wrapper.py
|
import time
from multiprocessing import Process, Queue, Event
from sensors.sensor import Sensor
def _inner_read(inner, read_queue, stop_event):
inner.__enter__()
while not stop_event.is_set():
buffer = inner.read()
read_queue.put_nowait(buffer)
time.sleep(0.1)
inner.__exit__(None, None, None)
class ProcessWrapper(Sensor):
def __init__(self, inner):
self.inner = inner
self.last_read = None
self.queue = Queue()
self.stop_event = Event()
def __enter__(self):
proc = Process(target=_inner_read, args=(self.inner, self.queue, self.stop_event))
proc.start()
return self
def __exit__(self, exit_type, value, traceback):
self.stop_event.set()
def read(self):
while not self.queue.empty():
self.last_read = self.queue.get_nowait()
return self.last_read
|
Servidor.py
|
# coding: utf-8
# In[ ]:
# -*- coding: utf-8 -*-
import socket
import os
import datetime as dt
import threading as th
import sys
def requestMessage (con,cliente,extensionDict,shareFolder):
print ("aguardando mensagem")
mensagem = con.recv(1048576).decode('utf-8')
msg = mensagem.split("\n")
request['operation'] = msg[0]
del(msg[0])
print(request['operation'])
#debug variable
cont = 0
for line in msg:
cont = cont+1
print (line)
lineSplit = line.split(': ')
try:
key = lineSplit[0]
valor = lineSplit[1]
request[key] = valor
except:
break
try:
filepath = request['operation'].split()[1]
except:
filepath='servConfig/400.html'
if filepath == '/':
nameFile = request['operation'].split()
file = open(shareFolder + '/Index.html','rb')
fileByte = file.read()
respostaString = '\nHTTP/1.1 200 Ok \r\n'
resposta = {
"Location" : "http://localhost:7000/",
'date' : str(dt.datetime.now()),
'Server' : 'jaoserver',
'Content-Type' : 'text/html',
'Content-Lenght' : str(len(fileByte))
}
for key,valor in resposta.items():
respostaString = respostaString + key+': '+ valor + '\r\n'
respostaString = respostaString + '\r\n'
con.send( respostaString.encode('utf-8') + fileByte )
else:
if os.path.isfile(shareFolder + filepath) :
file = open(shareFolder + filepath,'rb')
respostaString = '\nHTTP/1.1 200 ok! \r\n'
fileByte = file.read()
index = filepath.rfind('.')
keyExtension = filepath[index:]
elif os.path.isdir(shareFolder + filepath):
files = os.listdir(shareFolder + filepath)
createListHtml(filepath,files,shareFolder)
keyExtension = '.isdir'
file = open(shareFolder+'/temp/listDir.html','rb')
fileByte = file.read()
respostaString = '\nHTTP/1.1 200 ok! \n'
else:
file = open('servConfig/404.html','rb')
respostaString = '\nHTTP/1.1 404 Not Found! \r\n'
fileByte = file.read()
keyExtension = '.html'
resposta = {
"Location" : "http://localhost:7000/",
'date' : str(dt.datetime.now()),
'Server' : 'jaoserver',
'Content-Type' : extensionDict[keyExtension],
'Content-Length' : str(len(fileByte))
}
for key,valor in resposta.items():
respostaString = respostaString + key+': '+ valor + '\r\n'
respostaString = respostaString + '\r\n'
file.close()
con.sendall( respostaString.encode('utf-8') + fileByte )
con.close()
def createListHtml(filePath,files,shareFolder):
file = open(shareFolder + '/temp/listDir.html','w')
file.write('<html>')
file.write('<head><title>listDir</title></head>')
file.write('<body>')
file.write('<h1>MUTHERFUCKER PAGES</H1>')
for fileName in files:
file.write('<a href="' + filePath + '/'+fileName+'">'+fileName+'</a><br>')
file.write('</body>')
file.write('</html>')
file.close()
request = {}
host = 'localhost'
port = int(sys.argv[1])
shareFolder = sys.argv[2]
loadextensions = open('servConfig/extension.txt','r')
extensionDict = {}
for line in loadextensions:
keyValue = line.split('\t')
index = keyValue[1].find('\r\n')
extensionDict[keyValue[0]] = keyValue[1][:index]
loadextensions.close()
addr = (host, port)
serv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv_socket.bind(addr)
serv_socket.listen(10)
#variaveis declaradas
file = ''
fileByte = ''
cons = set()
cont = 0
while True:
con, cliente = serv_socket.accept()
print ('conectado')
cons.add(con)
th.Thread(target=requestMessage,args=(con, cliente, extensionDict,shareFolder)).start()
# In[ ]:
|
nbsp.py
|
# _*_ coding:UTF-8 _*_
import time
from threading import Thread, Event
from six.moves import queue
from .logger import get_logger
LOGGING = get_logger(__name__)
class NonBlockingStreamReader:
def __init__(self, stream, raise_EOF=False, print_output=True, print_new_line=True, name=None):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
raise_EOF: if True, raise an UnexpectedEndOfStream
when stream is EOF before kill
print_output: if True, print when readline
'''
self._s = stream
self._q = queue.Queue()
self._lastline = None
self.name = name or id(self)
def _populateQueue(stream, queue, kill_event):
'''
Collect lines from 'stream' and put them in 'quque'.
'''
while not kill_event.is_set():
line = stream.readline()
if line:
queue.put(line)
if print_output:
# print only new line
if print_new_line and line == self._lastline:
continue
self._lastline = line
LOGGING.debug("[%s]%s" % (self.name, repr(line.strip())))
elif kill_event.is_set():
break
elif raise_EOF:
raise UnexpectedEndOfStream
else:
print("EndOfStream: %s" % self.name)
break
self._kill_event = Event()
self._t = Thread(target=_populateQueue, args=(self._s, self._q, self._kill_event), name="nbsp_%s"%self.name)
self._t.daemon = True
self._t.start() # start collecting lines from the stream
def readline(self, timeout=None):
try:
return self._q.get(block=timeout is not None, timeout=timeout)
except queue.Empty:
return None
def read(self, timeout=0):
time.sleep(timeout)
lines = []
while True:
line = self.readline()
if line is None:
break
lines.append(line)
return b"".join(lines)
def kill(self):
self._kill_event.set()
class UnexpectedEndOfStream(Exception):
pass
|
diffido.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Diffido - because the F5 key is a terrible thing to waste.
Copyright 2018 Davide Alberani <da@erlug.linux.it>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import json
import pytz
import shutil
import urllib
import smtplib
from email.mime.text import MIMEText
from email.utils import formatdate
import logging
import datetime
import requests
import subprocess
import multiprocessing
from lxml import etree
from xml.etree import ElementTree
from tornado.ioloop import IOLoop
from apscheduler.triggers.cron import CronTrigger
from apscheduler.schedulers.tornado import TornadoScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
from tornado import gen, escape
JOBS_STORE = 'sqlite:///conf/jobs.db'
API_VERSION = '1.0'
SCHEDULES_FILE = 'conf/schedules.json'
DEFAULT_CONF = 'conf/diffido.conf'
EMAIL_FROM = 'diffido@localhost'
SMTP_SETTINGS = {}
GIT_CMD = 'git'
re_commit = re.compile(r'^(?P<id>[0-9a-f]{40}) (?P<message>.*)\n(?: .* '
'(?P<insertions>\d+) insertion.* (?P<deletions>\d+) deletion.*$)?', re.M)
re_insertion = re.compile(r'(\d+) insertion')
re_deletion = re.compile(r'(\d+) deletion')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def read_schedules():
"""Return the schedules configuration.
:returns: dictionary from the JSON object in conf/schedules.json
:rtype: dict"""
if not os.path.isfile(SCHEDULES_FILE):
return {'schedules': {}}
try:
with open(SCHEDULES_FILE, 'r') as fd:
schedules = json.loads(fd.read())
for id_ in schedules.get('schedules', {}).keys():
schedule = schedules['schedules'][id_]
try:
schedule['last_history'] = get_last_history(id_)
except:
schedule['last_history'] = {}
continue
return schedules
except Exception as e:
logger.error('unable to read %s: %s' % (SCHEDULES_FILE, e))
return {'schedules': {}}
def write_schedules(schedules):
"""Write the schedules configuration.
:param schedules: the schedules to save
:type schedules: dict
:returns: True in case of success
:rtype: bool"""
try:
with open(SCHEDULES_FILE, 'w') as fd:
fd.write(json.dumps(schedules, indent=2))
except Exception as e:
logger.error('unable to write %s: %s' % (SCHEDULES_FILE, e))
return False
return True
def next_id(schedules):
"""Return the next available integer (as a string) in the list of schedules keys (do not fills holes)
:param schedules: the schedules
:type schedules: dict
:returns: the ID of the next schedule
:rtype: str"""
ids = schedules.get('schedules', {}).keys()
if not ids:
return '1'
return str(max([int(i) for i in ids]) + 1)
def get_schedule(id_, add_id=True, add_history=False):
"""Return information about a single schedule
:param id_: ID of the schedule
:type id_: str
:param add_id: if True, add the ID in the dictionary
:type add_id: bool
:returns: the schedule
:rtype: dict"""
try:
schedules = read_schedules()
except Exception:
return {}
data = schedules.get('schedules', {}).get(id_, {})
if add_history and data:
data['last_history'] = get_last_history(id_)
if add_id:
data['id'] = str(id_)
return data
def select_xpath(content, xpath):
"""Select a portion of a HTML document
:param content: the content of the document
:type content: str
:param xpath: the XPath selector
:type xpath: str
:returns: the selected document
:rtype: str"""
tree = etree.HTML(content)
elems = tree.xpath(xpath)
if not elems:
return content
selected_content = []
for elem in elems:
pieces = []
if elem.text:
pieces.append(elem.text)
for sub_el in elem.getchildren():
try:
sub_el_text = ElementTree.tostring(sub_el, method='html').decode('utf-8', 'replace')
except:
continue
if sub_el_text:
pieces.append(sub_el_text)
selected_content.append(''.join(pieces))
content = ''.join(selected_content).strip()
return content
def run_job(id_=None, force=False, *args, **kwargs):
"""Run a job
:param id_: ID of the schedule to run
:type id_: str
:param force: run even if disabled
:type force: bool
:param args: positional arguments
:type args: tuple
:param kwargs: named arguments
:type kwargs: dict
:returns: True in case of success
:rtype: bool"""
schedule = get_schedule(id_, add_id=False)
url = schedule.get('url')
if not url:
return False
logger.debug('running job id:%s title:%s url: %s' % (id_, schedule.get('title', ''), url))
if not schedule.get('enabled') and not force:
logger.info('not running job %s: disabled' % id_)
return True
req = requests.get(url, allow_redirects=True, timeout=(30.10, 240))
content = req.text
xpath = schedule.get('xpath')
if xpath:
try:
content = select_xpath(content, xpath)
except Exception as e:
logger.warn('unable to extract XPath %s: %s' % (xpath, e))
req_path = urllib.parse.urlparse(req.url).path
base_name = os.path.basename(req_path) or 'index.html'
def _commit(id_, filename, content, queue):
try:
os.chdir('storage/%s' % id_)
except Exception as e:
logger.info('unable to move to storage/%s directory: %s; trying to create it...' % (id_, e))
_created = False
try:
_created = git_create_repo(id_)
except Exception as e:
logger.info('unable to move to storage/%s directory: %s; unable to create it' % (id_, e))
if not _created:
return False
current_lines = 0
if os.path.isfile(filename):
with open(filename, 'r') as fd:
for line in fd:
current_lines += 1
with open(filename, 'w') as fd:
fd.write(content)
p = subprocess.Popen([GIT_CMD, 'add', filename])
p.communicate()
p = subprocess.Popen([GIT_CMD, 'commit', '-m', '%s' % datetime.datetime.utcnow(), '--allow-empty'],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
stdout = stdout.decode('utf-8')
insert = re_insertion.findall(stdout)
if insert:
insert = int(insert[0])
else:
insert = 0
delete = re_deletion.findall(stdout)
if delete:
delete = int(delete[0])
else:
delete = 0
queue.put({'insertions': insert, 'deletions': delete, 'previous_lines': current_lines,
'changes': max(insert, delete)})
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=_commit, args=(id_, base_name, content, queue))
p.start()
res = queue.get()
p.join()
email = schedule.get('email')
if not email:
return True
changes = res.get('changes')
if not changes:
return True
min_change = schedule.get('minimum_change')
previous_lines = res.get('previous_lines')
if min_change and previous_lines:
min_change = float(min_change)
change_fraction = res.get('changes') / previous_lines
if change_fraction < min_change:
return True
# send notification
diff = get_diff(id_).get('diff')
if not diff:
return True
send_email(to=email, subject='%s page changed' % schedule.get('title'),
body='changes:\n\n%s' % diff)
return True
def safe_run_job(id_=None, *args, **kwargs):
"""Safely run a job, catching all the exceptions
:param id_: ID of the schedule to run
:type id_: str
:param args: positional arguments
:type args: tuple
:param kwargs: named arguments
:type kwargs: dict
:returns: True in case of success
:rtype: bool"""
try:
run_job(id_, *args, **kwargs)
except Exception as e:
send_email('error executing job %s: %s' % (id_, e))
def send_email(to, subject='diffido', body='', from_=None):
"""Send an email
:param to: destination address
:type to: str
:param subject: email subject
:type subject: str
:param body: body of the email
:type body: str
:param from_: sender address
:type from_: str
:returns: True in case of success
:rtype: bool"""
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = from_ or EMAIL_FROM
msg['To'] = to
msg["Date"] = formatdate(localtime=True)
starttls = SMTP_SETTINGS.get('smtp-starttls')
use_ssl = SMTP_SETTINGS.get('smtp-use-ssl')
username = SMTP_SETTINGS.get('smtp-username')
password = SMTP_SETTINGS.get('smtp-password')
args = {}
for key, value in SMTP_SETTINGS.items():
if key in ('smtp-starttls', 'smtp-use-ssl', 'smtp-username', 'smtp-password'):
continue
if key in ('smtp-port'):
value = int(value)
key = key.replace('smtp-', '', 1).replace('-', '_')
args[key] = value
try:
if use_ssl:
logger.debug('STMP SSL connection with args: %s' % repr(args))
with smtplib.SMTP_SSL(**args) as s:
if username:
logger.debug('STMP LOGIN for username %s and password of length %d' % (username, len(password)))
s.login(username, password)
s.send_message(msg)
else:
tls_args = {}
for key in ('ssl_keyfile', 'ssl_certfile', 'ssl_context'):
if key in args:
tls_args[key.replace('ssl_', '')] = args[key]
del args[key]
logger.debug('STMP connection with args: %s' % repr(args))
with smtplib.SMTP(**args) as s:
if starttls:
logger.debug('STMP STARTTLS connection with args: %s' % repr(tls_args))
s.ehlo_or_helo_if_needed()
s.starttls(**tls_args)
if username:
logger.debug('STMP LOGIN for username %s and password of length %d' % (username, len(password)))
s.login(username, password)
s.send_message(msg)
except Exception as e:
logger.error('unable to send email to %s: %s' % (to, e))
return False
return True
def get_history(id_, limit=None, add_info=False):
"""Read the history of a schedule
:param id_: ID of the schedule
:type id_: str
:param limit: number of entries to fetch
:type limit: int
:param add_info: add information about the schedule itself
:type add_info: int
:returns: information about the schedule and its history
:rtype: dict"""
def _history(id_, limit, queue):
try:
os.chdir('storage/%s' % id_)
except Exception as e:
logger.info('unable to move to storage/%s directory: %s' % (id_, e))
return queue.put(b'')
cmd = [GIT_CMD, 'log', '--pretty=oneline', '--shortstat']
if limit is not None:
cmd.append('-%s' % limit)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, _ = p.communicate()
queue.put(stdout)
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=_history, args=(id_, limit, queue))
p.start()
res = queue.get().decode('utf-8')
p.join()
history = []
for match in re_commit.finditer(res):
info = match.groupdict()
info['insertions'] = int(info['insertions'] or 0)
info['deletions'] = int(info['deletions'] or 0)
info['changes'] = max(info['insertions'], info['deletions'])
history.append(info)
last_id = None
if history and 'id' in history[0]:
last_id = history[0]['id']
for idx, item in enumerate(history):
item['seq'] = idx + 1
data = {'history': history, 'last_id': last_id}
if add_info:
data['schedule'] = get_schedule(id_)
return data
def get_last_history(id_):
"""Read the last history entry of a schedule
:param id_: ID of the schedule
:type id_: str
:returns: information about the schedule and its history
:rtype: dict"""
history = get_history(id_, limit=1)
hist = history.get('history') or [{}]
return hist[0]
def get_diff(id_, commit_id='HEAD', old_commit_id=None):
"""Return the diff between commits of a schedule
:param id_: ID of the schedule
:type id_: str
:param commit_id: the most recent commit ID; HEAD by default
:type commit_id: str
:param old_commit_id: the older commit ID; if None, the previous commit is used
:type old_commit_id: str
:returns: information about the schedule and the diff between commits
:rtype: dict"""
def _history(id_, commit_id, old_commit_id, queue):
try:
os.chdir('storage/%s' % id_)
except Exception as e:
logger.info('unable to move to storage/%s directory: %s' % (id_, e))
return queue.put(b'')
p = subprocess.Popen([GIT_CMD, 'diff', old_commit_id or '%s~' % commit_id, commit_id],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
queue.put(stdout)
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=_history, args=(id_, commit_id, old_commit_id, queue))
p.start()
res = queue.get().decode('utf-8')
p.join()
schedule = get_schedule(id_)
return {'diff': res, 'schedule': schedule}
def scheduler_update(scheduler, id_):
"""Update a scheduler job, using information from the JSON object
:param scheduler: the TornadoScheduler instance to modify
:type scheduler: TornadoScheduler
:param id_: ID of the schedule that must be updated
:type id_: str
:returns: True in case of success
:rtype: bool"""
schedule = get_schedule(id_, add_id=False)
if not schedule:
logger.warn('unable to update empty schedule %s' % id_)
return False
trigger = schedule.get('trigger')
if trigger not in ('interval', 'cron'):
logger.warn('unable to update empty schedule %s: trigger not in ("cron", "interval")' % id_)
return False
args = {}
if trigger == 'interval':
args['trigger'] = 'interval'
for unit in 'weeks', 'days', 'hours', 'minutes', 'seconds':
if 'interval_%s' % unit not in schedule:
continue
try:
val = schedule['interval_%s' % unit]
if not val:
continue
args[unit] = int(val)
except Exception:
logger.warn('invalid argument on schedule %s: %s parameter %s is not an integer' %
(id_, 'interval_%s' % unit, schedule['interval_%s' % unit]))
if len(args) == 1:
logger.error('no valid interval specified, skipping schedule %s' % id_)
return False
elif trigger == 'cron':
try:
cron_trigger = CronTrigger.from_crontab(schedule['cron_crontab'])
args['trigger'] = cron_trigger
except Exception:
logger.warn('invalid argument on schedule %s: cron_tab parameter %s is not a valid crontab' %
(id_, schedule.get('cron_crontab')))
return False
git_create_repo(id_)
try:
scheduler.add_job(safe_run_job, id=id_, replace_existing=True, kwargs={'id_': id_}, **args)
except Exception as e:
logger.warn('unable to update job %s: %s' % (id_, e))
return False
return True
def scheduler_delete(scheduler, id_):
"""Update a scheduler job, using information from the JSON object
:param scheduler: the TornadoScheduler instance to modify
:type scheduler: TornadoScheduler
:param id_: ID of the schedule
:type id_: str
:returns: True in case of success
:rtype: bool"""
try:
scheduler.remove_job(job_id=id_)
except Exception as e:
logger.warn('unable to delete job %s: %s' % (id_, e))
return False
return git_delete_repo(id_)
def reset_from_schedules(scheduler):
""""Reset all scheduler jobs, using information from the JSON object
:param scheduler: the TornadoScheduler instance to modify
:type scheduler: TornadoScheduler
:returns: True in case of success
:rtype: bool"""
ret = False
try:
scheduler.remove_all_jobs()
for key in read_schedules().get('schedules', {}).keys():
ret |= scheduler_update(scheduler, id_=key)
except Exception as e:
logger.warn('unable to reset all jobs: %s' % e)
return False
return ret
def git_init():
"""Initialize Git global settings"""
p = subprocess.Popen([GIT_CMD, 'config', '--global', 'user.email', '"%s"' % EMAIL_FROM])
p.communicate()
p = subprocess.Popen([GIT_CMD, 'config', '--global', 'user.name', '"Diffido"'])
p.communicate()
def git_create_repo(id_):
"""Create a Git repository
:param id_: ID of the schedule
:type id_: str
:returns: True in case of success
:rtype: bool"""
repo_dir = 'storage/%s' % id_
if os.path.isdir(repo_dir):
return True
p = subprocess.Popen([GIT_CMD, 'init', repo_dir])
p.communicate()
return p.returncode == 0
def git_delete_repo(id_):
"""Delete a Git repository
:param id_: ID of the schedule
:type id_: str
:returns: True in case of success
:rtype: bool"""
repo_dir = 'storage/%s' % id_
if not os.path.isdir(repo_dir):
return False
try:
shutil.rmtree(repo_dir)
except Exception as e:
logger.warn('unable to delete Git repository %s: %s' % (id_, e))
return False
return True
class DiffidoBaseException(Exception):
"""Base class for diffido custom exceptions.
:param message: text message
:type message: str
:param status: numeric http status code
:type status: int"""
def __init__(self, message, status=400):
super(DiffidoBaseException, self).__init__(message)
self.message = message
self.status = status
class BaseHandler(tornado.web.RequestHandler):
"""Base class for request handlers."""
# A property to access the first value of each argument.
arguments = property(lambda self: dict([(k, v[0].decode('utf-8'))
for k, v in self.request.arguments.items()]))
@property
def clean_body(self):
"""Return a clean dictionary from a JSON body, suitable for a query on MongoDB.
:returns: a clean copy of the body arguments
:rtype: dict"""
return escape.json_decode(self.request.body or '{}')
def write_error(self, status_code, **kwargs):
"""Default error handler."""
if isinstance(kwargs.get('exc_info', (None, None))[1], DiffidoBaseException):
exc = kwargs['exc_info'][1]
status_code = exc.status
message = exc.message
else:
message = 'internal error'
self.build_error(message, status=status_code)
def initialize(self, **kwargs):
"""Add every passed (key, value) as attributes of the instance."""
for key, value in kwargs.items():
setattr(self, key, value)
def build_error(self, message='', status=400):
"""Build and write an error message.
:param message: textual message
:type message: str
:param status: HTTP status code
:type status: int
"""
self.set_status(status)
self.write({'error': True, 'message': message})
def build_success(self, message='', status=200):
"""Build and write a success message.
:param message: textual message
:type message: str
:param status: HTTP status code
:type status: int
"""
self.set_status(status)
self.write({'error': False, 'message': message})
class SchedulesHandler(BaseHandler):
"""Schedules handler."""
@gen.coroutine
def get(self, id_=None, *args, **kwargs):
"""Get a schedule."""
if id_ is not None:
return self.write({'schedule': get_schedule(id_, add_history=True)})
schedules = read_schedules()
self.write(schedules)
@gen.coroutine
def put(self, id_=None, *args, **kwargs):
"""Update a schedule."""
if id_ is None:
return self.build_error(message='update action requires an ID')
data = self.clean_body
schedules = read_schedules()
if id_ not in schedules.get('schedules', {}):
return self.build_error(message='schedule %s not found' % id_)
schedules['schedules'][id_] = data
write_schedules(schedules)
scheduler_update(scheduler=self.scheduler, id_=id_)
self.write(get_schedule(id_=id_))
@gen.coroutine
def post(self, *args, **kwargs):
"""Add a schedule."""
data = self.clean_body
schedules = read_schedules()
id_ = next_id(schedules)
schedules['schedules'][id_] = data
write_schedules(schedules)
scheduler_update(scheduler=self.scheduler, id_=id_)
self.write(get_schedule(id_=id_))
@gen.coroutine
def delete(self, id_=None, *args, **kwargs):
"""Delete a schedule."""
if id_ is None:
return self.build_error(message='an ID must be specified')
schedules = read_schedules()
if id_ in schedules.get('schedules', {}):
del schedules['schedules'][id_]
write_schedules(schedules)
scheduler_delete(scheduler=self.scheduler, id_=id_)
self.build_success(message='removed schedule %s' % id_)
class RunScheduleHandler(BaseHandler):
"""Reset schedules handler."""
@gen.coroutine
def post(self, id_, *args, **kwargs):
if run_job(id_, force=True):
return self.build_success('job run')
self.build_error('job not run')
class ResetSchedulesHandler(BaseHandler):
"""Reset schedules handler."""
@gen.coroutine
def post(self, *args, **kwargs):
reset_from_schedules(self.scheduler)
class HistoryHandler(BaseHandler):
"""History handler."""
@gen.coroutine
def get(self, id_, *args, **kwargs):
self.write(get_history(id_, add_info=True))
class DiffHandler(BaseHandler):
"""Diff handler."""
@gen.coroutine
def get(self, id_, commit_id, old_commit_id=None, *args, **kwargs):
self.write(get_diff(id_, commit_id, old_commit_id))
class TemplateHandler(BaseHandler):
"""Handler for the template files in the / path."""
@gen.coroutine
def get(self, *args, **kwargs):
"""Get a template file."""
page = 'index.html'
if args and args[0]:
page = args[0].strip('/')
arguments = self.arguments
self.render(page, **arguments)
def serve():
"""Read configuration and start the server."""
global EMAIL_FROM, SMTP_SETTINGS
jobstores = {'default': SQLAlchemyJobStore(url=JOBS_STORE)}
scheduler = TornadoScheduler(jobstores=jobstores, timezone=pytz.utc)
scheduler.start()
define('port', default=3210, help='run on the given port', type=int)
define('address', default='', help='bind the server at the given address', type=str)
define('ssl_cert', default=os.path.join(os.path.dirname(__file__), 'ssl', 'diffido_cert.pem'),
help='specify the SSL certificate to use for secure connections')
define('ssl_key', default=os.path.join(os.path.dirname(__file__), 'ssl', 'diffido_key.pem'),
help='specify the SSL private key to use for secure connections')
define('admin-email', default='', help='email address of the site administrator', type=str)
define('smtp-host', default='localhost', help='SMTP server address', type=str)
define('smtp-port', default=0, help='SMTP server port', type=int)
define('smtp-local-hostname', default=None, help='SMTP local hostname', type=str)
define('smtp-use-ssl', default=False, help='Use SSL to connect to the SMTP server', type=bool)
define('smtp-starttls', default=False, help='Use STARTTLS to connect to the SMTP server', type=bool)
define('smtp-ssl-keyfile', default=None, help='SMTP SSL key file', type=str)
define('smtp-ssl-certfile', default=None, help='SMTP SSL cert file', type=str)
define('smtp-ssl-context', default=None, help='SMTP SSL context', type=str)
define('smtp-username', default='', help='SMTP username', type=str)
define('smtp-password', default='', help='SMTP password', type=str)
define('debug', default=False, help='run in debug mode', type=bool)
define('config', help='read configuration file',
callback=lambda path: tornado.options.parse_config_file(path, final=False))
if not options.config and os.path.isfile(DEFAULT_CONF):
tornado.options.parse_config_file(DEFAULT_CONF, final=False)
tornado.options.parse_command_line()
if options.admin_email:
EMAIL_FROM = options.admin_email
for key, value in options.as_dict().items():
if key.startswith('smtp-'):
SMTP_SETTINGS[key] = value
if options.debug:
logger.setLevel(logging.DEBUG)
ssl_options = {}
if os.path.isfile(options.ssl_key) and os.path.isfile(options.ssl_cert):
ssl_options = dict(certfile=options.ssl_cert, keyfile=options.ssl_key)
init_params = dict(listen_port=options.port, logger=logger, ssl_options=ssl_options,
scheduler=scheduler)
git_init()
_reset_schedules_path = r'schedules/reset'
_schedule_run_path = r'schedules/(?P<id_>\d+)/run'
_schedules_path = r'schedules/?(?P<id_>\d+)?'
_history_path = r'schedules/?(?P<id_>\d+)/history'
_diff_path = r'schedules/(?P<id_>\d+)/diff/(?P<commit_id>[0-9a-f]+)/?(?P<old_commit_id>[0-9a-f]+)?/?'
application = tornado.web.Application([
(r'/api/%s' % _reset_schedules_path, ResetSchedulesHandler, init_params),
(r'/api/v%s/%s' % (API_VERSION, _reset_schedules_path), ResetSchedulesHandler, init_params),
(r'/api/%s' % _schedule_run_path, RunScheduleHandler, init_params),
(r'/api/v%s/%s' % (API_VERSION, _schedule_run_path), RunScheduleHandler, init_params),
(r'/api/%s' % _history_path, HistoryHandler, init_params),
(r'/api/v%s/%s' % (API_VERSION, _history_path), HistoryHandler, init_params),
(r'/api/%s' % _diff_path, DiffHandler, init_params),
(r'/api/v%s/%s' % (API_VERSION, _diff_path), DiffHandler, init_params),
(r'/api/%s' % _schedules_path, SchedulesHandler, init_params),
(r'/api/v%s/%s' % (API_VERSION, _schedules_path), SchedulesHandler, init_params),
(r'/?(.*)', TemplateHandler, init_params),
],
static_path=os.path.join(os.path.dirname(__file__), 'dist/static'),
template_path=os.path.join(os.path.dirname(__file__), 'dist/'),
debug=options.debug)
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options or None)
logger.info('Start serving on %s://%s:%d', 'https' if ssl_options else 'http',
options.address if options.address else '127.0.0.1',
options.port)
http_server.listen(options.port, options.address)
try:
IOLoop.instance().start()
except (KeyboardInterrupt, SystemExit):
pass
if __name__ == '__main__':
serve()
|
jabberChat.py
|
#!/usr/bin/python
"""
__version__ = "$Revision: 1.34 $"
__date__ = "$Date: 2005/12/13 11:13:23 $"
"""
from PythonCard import configuration, model, sound, timer
import threading
import Queue
import ConfigParser
import wx
import os
import jabber
import time
import shutil
from chatWindow import ChatWindow
from groupChatWindow import GroupChatWindow
from connection import JabberConnection
import conferenceDialog
CONFIG_FILE = 'jabberChat.ini'
class Chat(model.Background):
def on_initialize(self, event):
self.initSizers()
self.loadConfig()
self.displayOfflineUsers = 0
self.roster = {}
self.chatWindows = {}
self.msgQueue = Queue.Queue()
self.rosterQueue = Queue.Queue()
self.jabberConnection = JabberConnection(self, self.account)
self.thread = threading.Thread(target = self.jabberConnection.spinMyWheels)
self.thread.setDaemon(1)
self.thread.start()
self.idleTimer = timer.Timer(self.components.listRoster, -1)
self.idleTimer.start(1000) # 1 second
self.doResetIdle()
# the Available and Do Not Disturb strings
# should probably be settable as resource strings
# the connection module references them as menu item labels
# when the user starts the program, just default to Available
self.statusBar.text = "Available"
def initSizers(self):
sizer1 = wx.BoxSizer(wx.VERTICAL)
sizer1.Add(self.components.listRoster, 1, wx.EXPAND)
sizer1.Fit(self)
sizer1.SetSizeHints(self)
self.panel.SetSizer(sizer1)
self.panel.SetAutoLayout(1)
self.panel.Layout()
def setChatWindowFont(self):
for win in self.chatWindows.itervalues():
win.setFonts(self.config['font'])
def on_doSetFont_command(self, event):
result = dialog.fontDialog(self, self.components.fldDocument.font)
if result.accepted:
self.config['font'] = result.font
self.setChatWindowFont()
def loadConfig(self):
self.configPath = os.path.join(configuration.homedir, 'jabberchat')
if not os.path.exists(self.configPath):
os.mkdir(self.configPath)
basePath = self.application.applicationDirectory
configPath = os.path.join(self.configPath, CONFIG_FILE)
if not os.path.exists(configPath):
shutil.copy2(os.path.join(basePath, CONFIG_FILE), configPath)
namesPath = os.path.join(self.configPath, 'names.txt')
if not os.path.exists(namesPath):
shutil.copy2(os.path.join(basePath, 'names.txt'), namesPath)
parser = ConfigParser.ConfigParser()
parser.read(configPath)
self.account = {}
self.account['server'] = parser.get('Account', 'server')
self.account['username'] = parser.get('Account', 'username')
self.account['password'] = parser.get('Account', 'password')
self.account['resource'] = parser.get('Account', 'resource')
self.config = {}
# this needs to be made safe instead of using eval
try:
self.config['font'] = eval(parser.get('ChatWindow', 'font', None))
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
self.config['font'] = None
try:
self.config['playsound'] = eval(parser.get('Options', 'playsound'))
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
self.config['playsound'] = 0
try:
self.config['idletime'] = eval(parser.get('Options', 'idletime')) * 60
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
self.config['idletime'] = 0
self.displayNames = {}
try:
f = open(namesPath)
data = f.readlines()
f.close()
for line in data:
jid, name = line.rstrip().split(',')
self.displayNames[jid] = name
except IOError:
pass
# when user selects "Available"
# we need to reset
def doResetIdle(self):
self.checkForIdle = 1
self.userIsIdle = 0
self.startIdle = time.time()
self.lastPosition = wx.GetMousePosition()
def on_idle(self, event):
# handle incoming Jabber messages
if not self.msgQueue.empty():
msg = self.msgQueue.get()
self.doDisplayMsgReceived(msg)
event.RequestMore()
# handle roster changes
if not self.rosterQueue.empty():
jid, roster = self.rosterQueue.get()
if jid is None:
self.updateRosterDisplay(roster)
else:
self.updateGroupChatRosterDisplay(jid, roster)
event.RequestMore()
# KEA 2002-11-17
# updates to how the roster displays
# I would like to move this into a MultiColumnList
# with icons for online/offline...
# 2002-11-30
# moved from JabberConnection class
# I'm still not sure that thread conflicts won't occur
# but this at least seems safer
# one thing that probably still needs to be added is a way
# of always using the same display order
def updateRosterDisplay(self, roster):
items = []
# use this instead of displayed list since the
# display may have a name or jid and the roster
# itself is a dictionary that doesn't match one to one
# with self._parent.components.listRoster.items
self.displayedRosterList = []
# KEA 2003-06-04
# roster dictionary might change in size
# due to other thread, so make a list of keys
rosterList = roster.keys()
for key in rosterList:
status = roster[key][0]
show = roster[key][1]
if show:
show = " (%s)" % roster[key][1]
else:
show = ''
name = self.displayNames.get(key, key)
if status == 'online':
items.append('* ' + name + show)
self.displayedRosterList.append(key)
elif self.displayOfflineUsers:
items.append(' ' + name)
self.displayedRosterList.append(key)
self.components.listRoster.items = items
self.roster = roster
def updateGroupChatRosterDisplay(self, jid, roster):
listRoster = self.chatWindows[jid].components.listRoster
items = []
for key in roster:
status = roster[key][0]
if status:
items.append("%s (%s)" % (key, str(status)))
else:
items.append(key)
items.sort()
listRoster.items = items
def on_listRoster_timer(self, event):
# check for user idle if user is currently Available
# but don't bother if Do Not Disturb... are set instead
if self.checkForIdle and (self.config['idletime'] != 0):
position = wx.GetMousePosition()
if position == self.lastPosition:
if self.userIsIdle == 0:
# check whether we've been idle too long
if (time.time() - self.startIdle) > self.config['idletime']:
self.userIsIdle = 1
self.jabberConnection.sendPresence("Idle")
self.statusBar.text = "Idle"
#print "***user is idle***"
else:
if self.userIsIdle:
#print "***user is no longer idle"
#print "there was no activity for", round((time.time() - self.startIdle) / 60), "minutes"
self.userIsIdle = 0
self.jabberConnection.sendPresence("Available")
self.statusBar.text = "Available"
self.startIdle = time.time()
self.lastPosition = position
def createChatWindow(self, jid):
# the jid should be in the form of username@domain
win = model.childWindow(self, ChatWindow)
win.setFonts(self.config['font'])
# override resource position
#win.SetPosition((425, -1))
# just in case, convert to string, will this clear up Unicode issues?
jid = str(jid)
toName = self.displayNames.get(jid, jid)
win.setToJID(jid, toName)
win.visible = True
self.chatWindows[jid] = win
def createGroupChatWindow(self, jid, nickname=None):
# the jid should be in the form of username@domain
win = model.childWindow(self, GroupChatWindow)
win.setFonts(self.config['font'])
# override resource position
#win.SetPosition((425, -1))
# just in case, convert to string, will this clear up Unicode issues?
jid = str(jid)
toName = self.displayNames.get(jid, jid)
if nickname is None:
nickname = self.jabberConnection.username
win.nickname = nickname
win.setToJID(jid, toName)
win.visible = True
self.chatWindows[jid] = win
self.jabberConnection.joinGroupChat(jid, nickname)
def playIncomingSound(self):
if self.config['playsound']:
try:
filename = os.path.join(self.application.applicationDirectory, 'incoming.wav')
snd = sound.Sound(filename)
snd.play(1, 0)
except IOError:
pass
def doDisplayMsgReceived(self, data):
if data is not None:
jid, txt = data
jid = str(jid)
try:
jid, resource = jid.split('/')
except ValueError:
resource = "default"
if jid not in self.chatWindows:
self.createChatWindow(jid)
self.playIncomingSound()
#self.components.fldTranscript.appendText(data + '\n')
self.chatWindows[jid].appendMessage(jid + "/" + resource, txt)
else:
pass
# this code is dependent on the format
# of the text in the list
# so a change to the updateRosterDisplay
# method in the connection module must be
# reflected here until the jids are stored
# outside the list
def on_listRoster_mouseDoubleClick(self, event):
jid = self.displayedRosterList[event.target.selection]
if jid not in self.chatWindows:
self.createChatWindow(jid)
else:
self.chatWindows[jid].visible = True
# make sure the chat window is in front
# and isn't minimized (iconized)
if self.chatWindows[jid].IsIconized():
self.chatWindows[jid].Iconize(0)
wx.CallAfter(self.chatWindows[jid].Raise)
def on_close(self, event):
self.jabberConnection.keepRunning = 0
self.jabberConnection.disconnect()
self.idleTimer.stop()
event.skip()
def on_changeStatus_command(self, event):
self.jabberConnection.sendPresence(event.target.label)
self.statusBar.text = event.target.label
if event.target.label == "Available":
self.doResetIdle()
else:
self.checkForIdle = 0
def on_menuOptionsDisplayOfflineUsers_select(self, event):
self.displayOfflineUsers = event.IsChecked()
self.updateRosterDisplay(self.roster)
def on_menuFileJoinConference_select(self, event):
room = ''
server = None
nickname = self.jabberConnection.username
result = conferenceDialog.conferenceDialog(self, '', server, nickname)
if result.accepted:
room = result.room
server = result.server
nickname = result.nickname
jid = room + '@' + server
self.createGroupChatWindow(jid, nickname)
if __name__ == '__main__':
app = model.Application(Chat)
app.MainLoop()
|
influence.py
|
import networkx as nx
import numpy as np
from icm import sample_live_icm, indicator, make_multilinear_objective_samples
from utils import greedy
from multiprocessing import Process, Manager
PROP_PROBAB = 0.1
BUDGET = 10
PROCESSORS = 8
SAMPLES = 100
def multi_to_set(f,g):
'''
Takes as input a function defined on indicator vectors of sets, and returns
a version of the function which directly accepts sets
'''
def f_set(S):
return f(indicator(S, len(g)))
return f_set
def influence(graph, full_graph, samples=SAMPLES):
for u,v in graph.edges():
graph[u][v]['p']=PROP_PROBAB
def genoptfunction(graph, samples=1000):
live_graphs = sample_live_icm(graph, samples)
f_multi = make_multilinear_objective_samples(live_graphs, list(graph.nodes()), list(graph.nodes()), np.ones(len(graph)))
f_set = multi_to_set(f_multi, graph)
return f_set
f_set = genoptfunction(graph, samples)
S, obj = greedy(list(range(len(graph))), BUDGET, f_set)
f_set1 = genoptfunction(full_graph, samples)
opt_obj = f_set1(S)
return opt_obj, obj, S
def parallel_influence(graph, full_graph, times, samples=SAMPLES, influence=influence):
def influence_wrapper(l,g,fg,s,influence=influence):
ans = influence(g,fg,s)
l.append(ans[0])
l = Manager().list()
processes = [Process(target=influence_wrapper, args=(l, graph, full_graph, samples)) for _ in range(times)]
i=0
while i<len(processes):
j = i+PROCESSORS if i+PROCESSORS < len(processes) else len(processes)-1
ps = processes[i:j]
for p in ps:
p.start()
for p in ps:
p.join()
i+= PROCESSORS
l = list(l)
return np.mean(l)
if __name__ == "__main__":
g = nx.erdos_renyi_graph(100,0.5)
for u,v in g.edges():
g[u][v]['p'] = 0.1
import time
start = time.time()
print(parallel_influence(g,g,10, 1000))
end1 = time.time()
print('Parallel took', end1-start, 'seconds')
start = time.time()
ls = [influence(g,g,100)[0] for _ in range(10)]
print(np.mean(ls))
end1 = time.time()
print('Seq took', end1-start, 'seconds')
|
object_detections_EAM.py
|
import cv2
import socket
import struct
import threading
import json
import time
#import netifaces as ni
import numpy as np
import os
from utils.eucl_tracker import EuclideanDistTracker
from modules.detector.predictor import COCODemo
from kafka import KafkaProducer
from utils.objects import Person
from utils.functions import*
from utils.frame_message import Frame
#import multiprocessing
import multiprocessing as mp
def display_frame(det_q):
while True:
if not det_q.empty():
frame=det_q.get()
image=frame[0]
CameraId= str(frame[1].hex())
# Display RGB frame (for test purposes)
cv2.namedWindow('RGB from: '+CameraId,cv2.WINDOW_AUTOSIZE)
cv2.imshow('RGB from: '+CameraId,image)
key= cv2.waitKey(1)
if key & 0xFF == ord('q') or key==27:
cv2.destroyAllWindows()
break
def handle_client(Client, address,q):
# Read messages
while True:
data = bytearray()
new_message = True
payload = 18
while True:
msg = Client.recv(payload-len(data))
if new_message:
if msg[:1].hex()!='a5' or msg[1:2].hex()!='5a':
continue
payload = struct.unpack('l',msg[2:10])[0] + 18
data.extend(msg)
new_message= False
continue
data.extend(msg)
if len(data)>=payload:
break
# Create frame from messages
current_frame = Frame(bytes(data))
# Push to queue
q.put(current_frame)
def run_detections(frame_q,det_q):
#Initializations
detector = COCODemo(min_image_size=640, confidence_threshold=0.9)
#tracker = EuclideanDistTracker()
#producer = KafkaProducer(bootstrap_servers=['195.251.117.126:9091'],
# value_serializer=lambda x:
# json.dumps(x).encode('utf-8'))
producer = KafkaProducer(bootstrap_servers=[str(os.environ['IP_KAFKA']) + ':' + str(os.environ['PORT_KAFKA'])],
value_serializer=lambda x:
json.dumps(x).encode('utf-8'))
start_time = time.time()
unique_dets=[]
counter_uniq_object_id=0
last_uniq_object_id=0
while True:
if not frame_q.empty():
frame = frame_q.get()
result, result_labels, result_scores, result_bboxes = detector.run_on_opencv_image(frame.image)
detections = []
if result_labels!=[]:
for x in range(len(result_labels)):
# Calculate depth of area around object center
xc = int(round(result_bboxes[x][0]+result_bboxes[x][2])//2)
yc = int(round(result_bboxes[x][1]+result_bboxes[x][3])//2)
object_width = round(result_bboxes[x][2]-result_bboxes[x][0])
object_height = round(result_bboxes[x][3]-result_bboxes[x][1])
z = get_depth_of_center(xc,yc,object_width, object_height, frame.depth)
if z==0: # do not calculate object to close on camera
continue
# Create Detection object
obj = Person(result_labels[x],result_scores[x],result_bboxes[x],x,z)
isVictim(int(result_bboxes[x][0]),int(result_bboxes[x][1]),int(result_bboxes[x][2]),int(result_bboxes[x][3]), frame.depth, obj, z)
if result_labels[x]=="person":
detections.append(obj)
if detections!=[]:
if unique_dets!=[]:
for det in detections:
det.draw_detection(frame.image)
exists = False
for uniq in unique_dets:
if calculate_spatial_distance(det,uniq) < 0.2:
exists = True
break
if not exists:
det.update_id(counter_uniq_object_id)
counter_uniq_object_id +=1
unique_dets.append(det)
else:
for det in detections:
det.draw_detection(frame.image)
det.update_id(counter_uniq_object_id)
counter_uniq_object_id +=1
unique_dets.append(det)
det_q.put([frame.image,frame.CameraId])
#print("Pushed detection to queue")
if (time.time() - start_time) > 9:
if unique_dets!=[]:
# Send new detections over Kafka
if last_uniq_object_id==0:
kafka_thread = threading.Thread(name='non-daemon', target=generates_msg(unique_dets,producer))
kafka_thread.start()
last_uniq_object_id= counter_uniq_object_id
else:
if unique_dets[last_uniq_object_id:]!=[]:
kafka_thread = threading.Thread(name='non-daemon', target=generates_msg(unique_dets[last_uniq_object_id:],producer))
kafka_thread.start()
last_uniq_object_id= counter_uniq_object_id
#unique_dets = [] #Set list of detection object as empty eath time send object to kafka
start_time = time.time()
## Track detections
#tracked_detections = tracker.update(detections)
#
#
#if tracked_detections!=[]:
# for t in tracked_detections:
# # Display detections
# t.draw_detection(frame.image)
#
# if t.obj_id not in unique_ids:
# #time.sleep(1)
# # Send new detections over Kafka
# kafka_thread = threading.Thread(name='non-daemon', target=generates_msg(t,producer))
#
# kafka_thread.start()
# #print("Sent detection message to kafka.")
# unique_ids.append(t.obj_id)
#det_q.put(frame.image)
def main():
# Initialize TCP server
ServerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#host= ni.ifaddresses('eno1')[ni.AF_INET][0]['addr'] # Return the IP of server
#port = 4567
host= str(os.environ['IP_EAM']) #Get IP of EAM from .env file
port = int(os.environ['PORT_EAM']) #Get port of EAM from .env file
try:
ServerSocket.bind((host, port))
except socket.error as e:
print(str(e))
# Initialize queues and processes
frame_q = mp.Queue()
det_q = mp.Queue()
detector_process = mp.Process(target=run_detections, args=(frame_q, det_q))
display_process=mp.Process(target=display_frame, args=(det_q,))
detector_process.start()
display_process.start()
# Listen for connections
ServerSocket.listen()
try:
while True:
Client, address = ServerSocket.accept()
client_process = mp.Process(target=handle_client, args=(Client, address,frame_q))
client_process.start()
except KeyboardInterrupt:
client_process.join()
detector_process.join()
display_process.join()
cv2.destroyAllWindows()
if __name__=="__main__":
main()
|
jetson_infer_op.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import math
import argparse
from threading import Thread
# some particular ops
black_list = [
# special op
'test_custom_relu_op_setup',
'test_custom_relu_op_jit',
'test_python_operator_overriding',
'test_c_comm_init_all_op',
'test_c_embedding_op',
# train op
'test_imperative_optimizer',
'test_imperative_optimizer_v2',
'test_momentum_op',
'test_sgd_op',
'test_sgd_op_bf16',
'test_warpctc_op',
# sync op
'test_sync_batch_norm_op',
# case too large
'test_reduce_op',
'test_transpose_op'
]
op_diff_list = [
# diff<1E-7,it's right
'test_elementwise_mul_op'
]
def parse_arguments():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--shell_name',
type=str,
default='get_op_list.sh',
help='please input right name')
parser.add_argument('--op_list_file',
type=str,
default='list_op.txt',
help='please input right name')
return parser.parse_args()
def search_file(file_name, path, file_path):
"""
:param file_name:target
:param path: to search this path
:param file_path: result
:return:
"""
for item in os.listdir(path):
if os.path.isdir(os.path.join(path, item)):
search_file(file_name, os.path.join(path, item), file_path)
else:
if file_name in item:
file_path.append(os.path.join(path, file_name))
def get_prefix(line, end_char='d'):
"""
:param line: string_demo
:param end_char: copy the prefix of string_demo until end_char
:return: prefix
"""
i = 0
prefix = ''
while (line[i] != end_char):
prefix += line[i]
i += 1
return prefix
def add_import_skip_return(file, pattern_import, pattern_skip, pattern_return):
"""
:param file: the file need to be changed
:param pattern_import: import skip
:param pattern_skip: @skip
:param pattern_return: add return
:return:
"""
pattern_1 = re.compile(pattern_import)
pattern_2 = re.compile(pattern_skip)
pattern_3 = re.compile(pattern_return)
file_data = ""
# change file
with open(file, "r", encoding="utf-8") as f:
for line in f:
# add import skip_check_grad_ci
match_obj = pattern_1.search(line)
if match_obj is not None:
line = line[:-1] + ", skip_check_grad_ci\n"
print("### add import skip_check_grad_ci ####")
# add @skip_check_grad_ci
match_obj = pattern_2.search(line)
if match_obj is not None:
file_data += "@skip_check_grad_ci(reason='jetson do n0t neeed this !')\n"
print("### add @skip_check_grad_ci ####")
# delete test_grad_output
match_obj = pattern_3.search(line)
if match_obj is not None:
file_data += line
file_data += get_prefix(line)
file_data += " return\n"
print("### add return for function ####")
continue
file_data += line
# save file
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
def get_op_list(op_list_file='list_op.txt'):
"""
:param op_list_file: op list file
:return: list of op
"""
op_list = []
with open(op_list_file, "r", encoding="utf-8") as f:
for line in f:
if line in black_list:
continue
# delete /n
op_list.append(line[:-1])
return op_list
def set_diff_value(file, atol="1e-5", inplace_atol="1e-7"):
"""
:param file: refer to op_test.py
:param atol: refer to op_test.py
:param inplace_atol:
:return:
"""
os.system("sed -i 's/self.check_output(/self\.check_output\(atol=" + atol +
",inplace_atol=" + inplace_atol + ",/g\' " + file)
def change_op_file(start=0, end=0, op_list_file='list_op.txt', path='.'):
"""
:param start:
:param end:
:param op_list_file: op_list
:param path: just the file in this path
:return:
"""
test_op_list = get_op_list(op_list_file)
file_path = []
for id in range(start, end):
item = test_op_list[id]
print(id, ":", item)
search_file(item + '.py', os.path.abspath(path), file_path)
if len(file_path) == 0:
print("'", item, "' is not a python file!")
continue
file_with_path = file_path[0]
# pattern
pattern_import = ".*import OpTest.*"
pattern_skip = "^class .*\(OpTest\):$"
pattern_return = "def test.*grad.*\):$"
# change file
add_import_skip_return(file_with_path, pattern_import, pattern_skip,
pattern_return)
# op_diff
if item in op_diff_list:
set_diff_value(file_with_path)
file_path.clear()
def run_multi_thread(list_file, thread_num=4):
"""
:param list_file:
:param thread_num:
:return:
"""
length = len(get_op_list(list_file))
thread_list = []
start = 0
end = 0
for item in range(thread_num):
# set argument
start = math.floor(item / thread_num * length)
end = math.floor((item + 1) / thread_num * length)
print("thread num-", item, ":", start, end)
thread = Thread(target=change_op_file, args=(start, end))
thread_list.append(thread)
# start a thread
thread.start()
# wait all thread
for item in thread_list:
item.join()
# add a flag
with open("flag_change_file.txt", "w", encoding="utf-8") as f:
f.write("change successfully!")
print("------change successfully!-------")
def transform_list_to_str(list_op):
"""
:param list_op:
:return:
"""
res = ""
for item in list_op:
tmp = "^" + item + "$|"
res += tmp
return res[:-1]
def run_file_change(op_list_file):
"""
if file has changed, the file should not be changed again.
:param op_list_file:
:return:
"""
if (os.path.exists("flag_change_file.txt")):
print(
"-----maybe op_file has changed, so don't need to change again------"
)
else:
run_multi_thread(op_list_file)
def run_test_first(op_list_file):
"""
run all op test.
:param op_list_file:
:return:
"""
old_list = get_op_list(op_list_file)
new_list = filter(lambda x: x not in black_list, old_list)
op_test = transform_list_to_str(new_list)
os.system("ctest -R \"(" + op_test + ")\" >& test_op_log.txt")
def run_test_second():
"""
run failed op again.
:return:
"""
os.system(
"sed -n '/(Failed)$/p' test_op_log.txt | awk '{print $3}' >& rerun_op.txt"
)
rerun_list = get_op_list('rerun_op.txt')
if (len(rerun_list)):
print("-------there are " + str(len(rerun_list)) +
" op(s) need to rerun!!!-------")
for failed_op in rerun_list:
os.system("ctest -R \"(" + failed_op + ")\" ")
else:
print("-------all op passed successfully!!!-------")
if __name__ == '__main__':
arg = parse_arguments()
print("------start get op list!------")
os.system("bash " + arg.shell_name + " " + arg.op_list_file)
print("------start change op file!------")
run_file_change(arg.op_list_file)
print("------start run op test first!------")
run_test_first(arg.op_list_file)
print("------start run failed_op test!------")
run_test_second()
print("------do well!------")
|
__init__.py
|
"""
Create ssh executor system
"""
import base64
import binascii
import copy
import datetime
import getpass
import hashlib
import logging
import multiprocessing
import os
import queue
import re
import subprocess
import sys
import tarfile
import tempfile
import time
import uuid
import salt.client.ssh.shell
import salt.client.ssh.wrapper
import salt.config
import salt.defaults.exitcodes
import salt.exceptions
import salt.loader
import salt.log
import salt.minion
import salt.output
import salt.roster
import salt.serializers.yaml
import salt.state
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.json
import salt.utils.network
import salt.utils.path
import salt.utils.stringutils
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
from salt.template import compile_template
from salt.utils.platform import is_junos, is_windows
from salt.utils.process import Process
from salt.utils.zeromq import zmq
try:
import saltwinshell
HAS_WINSHELL = True
except ImportError:
HAS_WINSHELL = False
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = "/var/tmp/.%%USER%%_%%FQDNUUID%%_salt"
# RSTR is just a delimiter to distinguish the beginning of salt STDOUT
# and STDERR. There is no special meaning. Messages prior to RSTR in
# stderr and stdout are either from SSH or from the shim.
#
# RSTR on both stdout and stderr:
# no errors in SHIM - output after RSTR is from salt
# No RSTR in stderr, RSTR in stdout:
# no errors in SSH_SH_SHIM, but SHIM commands for salt master are after
# RSTR in stdout
# No RSTR in stderr, no RSTR in stdout:
# Failure in SHIM
# RSTR in stderr, No RSTR in stdout:
# Undefined behavior
RSTR = "_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878"
# The regex to find RSTR in output - Must be on an output line by itself
# NOTE - must use non-grouping match groups or output splitting will fail.
RSTR_RE = r"(?:^|\r?\n)" + RSTR + r"(?:\r?\n|$)"
# METHODOLOGY:
#
# 1) Make the _thinnest_ /bin/sh shim (SSH_SH_SHIM) to find the python
# interpreter and get it invoked
# 2) Once a qualified python is found start it with the SSH_PY_SHIM
# 3) The shim is converted to a single semicolon separated line, so
# some constructs are needed to keep it clean.
# NOTE:
# * SSH_SH_SHIM is generic and can be used to load+exec *any* python
# script on the target.
# * SSH_PY_SHIM is in a separate file rather than stuffed in a string
# in salt/client/ssh/__init__.py - this makes testing *easy* because
# it can be invoked directly.
# * SSH_PY_SHIM is base64 encoded and formatted into the SSH_SH_SHIM
# string. This makes the python script "armored" so that it can
# all be passed in the SSH command and will not need special quoting
# (which likely would be impossibe to do anyway)
# * The formatted SSH_SH_SHIM with the SSH_PY_SHIM payload is a bit
# big (~7.5k). If this proves problematic for an SSH command we
# might try simply invoking "/bin/sh -s" and passing the formatted
# SSH_SH_SHIM on SSH stdin.
# NOTE: there are two passes of formatting:
# 1) Substitute in static values
# - EX_THIN_PYTHON_INVALID - exit code if a suitable python is not found
# 2) Substitute in instance-specific commands
# - DEBUG - enable shim debugging (any non-zero string enables)
# - SUDO - load python and execute as root (any non-zero string enables)
# - SSH_PY_CODE - base64-encoded python code to execute
# - SSH_PY_ARGS - arguments to pass to python code
# This shim generically loads python code . . . and *no* more.
# - Uses /bin/sh for maximum compatibility - then jumps to
# python for ultra-maximum compatibility.
#
# 1. Identify a suitable python
# 2. Jump to python
# Note the list-comprehension syntax to define SSH_SH_SHIM is needed
# to be able to define the string with indentation for readability but
# still strip the white space for compactness and to avoid issues with
# some multi-line embedded python code having indentation errors
SSH_SH_SHIM = "\n".join(
[
s.strip()
for s in r'''/bin/sh << 'EOF'
set -e
set -u
DEBUG="{{DEBUG}}"
if [ -n "$DEBUG" ]
then set -x
fi
SET_PATH="{{SET_PATH}}"
if [ -n "$SET_PATH" ]
then export PATH={{SET_PATH}}
fi
SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
SUDO_USER="{{SUDO_USER}}"
if [ "$SUDO" ] && [ "$SUDO_USER" ]
then SUDO="sudo -u {{SUDO_USER}}"
elif [ "$SUDO" ] && [ -n "$SUDO_USER" ]
then SUDO="sudo "
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python3 /usr/libexec/platform-python python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
do
if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
then
py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function;import sys; print(sys.executable);'`
cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
.format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
"$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
else
exec $SUDO "$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
fi
exit 0
else
continue
fi
done
echo "ERROR: Unable to locate appropriate python command" >&2
exit $EX_PYTHON_INVALID
EOF'''.format(
EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,
).split(
"\n"
)
]
)
if not is_windows() and not is_junos():
shim_file = os.path.join(os.path.dirname(__file__), "ssh_py_shim.py")
if not os.path.exists(shim_file):
# On esky builds we only have the .pyc file
shim_file += "c"
with salt.utils.files.fopen(shim_file) as ssh_py_shim:
SSH_PY_SHIM = ssh_py_shim.read()
else:
SSH_PY_SHIM = None
log = logging.getLogger(__name__)
class SSH:
"""
Create an SSH execution system
"""
ROSTER_UPDATE_FLAG = "#__needs_update"
def __init__(self, opts):
self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True}
pull_sock = os.path.join(opts["sock_dir"], "master_event_pull.ipc")
if os.path.exists(pull_sock) and zmq:
self.event = salt.utils.event.get_event(
"master", opts["sock_dir"], opts["transport"], opts=opts, listen=False
)
else:
self.event = None
self.opts = opts
if self.opts["regen_thin"]:
self.opts["ssh_wipe"] = True
if not salt.utils.path.which("ssh"):
raise salt.exceptions.SaltSystemExit(
code=-1,
msg=(
"No ssh binary found in path -- ssh must be installed for salt-ssh"
" to run. Exiting."
),
)
self.opts["_ssh_version"] = ssh_version()
self.tgt_type = (
self.opts["selected_target_option"]
if self.opts["selected_target_option"]
else "glob"
)
self._expand_target()
self.roster = salt.roster.Roster(self.opts, self.opts.get("roster", "flat"))
self.targets = self.roster.targets(self.opts["tgt"], self.tgt_type)
if not self.targets:
self._update_targets()
# If we're in a wfunc, we need to get the ssh key location from the
# top level opts, stored in __master_opts__
if "__master_opts__" in self.opts:
if self.opts["__master_opts__"].get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts["__master_opts__"].get(
"ssh_priv",
os.path.join(
self.opts["__master_opts__"]["pki_dir"], "ssh", "salt-ssh.rsa"
),
)
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
if priv != "agent-forwarding":
if not os.path.isfile(priv):
try:
salt.client.ssh.shell.gen_key(priv)
except OSError:
raise salt.exceptions.SaltClientError(
"salt-ssh could not be run because it could not generate"
" keys.\n\nYou can probably resolve this by executing this"
" script with increased permissions via sudo or by running as"
" root.\nYou could also use the '-c' option to supply a"
" configuration directory that you have permissions to read and"
" write to."
)
self.defaults = {
"user": self.opts.get(
"ssh_user", salt.config.DEFAULT_MASTER_OPTS["ssh_user"]
),
"port": self.opts.get(
"ssh_port", salt.config.DEFAULT_MASTER_OPTS["ssh_port"]
),
"passwd": self.opts.get(
"ssh_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_passwd"]
),
"priv": priv,
"priv_passwd": self.opts.get(
"ssh_priv_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_priv_passwd"]
),
"timeout": self.opts.get(
"ssh_timeout", salt.config.DEFAULT_MASTER_OPTS["ssh_timeout"]
)
+ self.opts.get("timeout", salt.config.DEFAULT_MASTER_OPTS["timeout"]),
"sudo": self.opts.get(
"ssh_sudo", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo"]
),
"sudo_user": self.opts.get(
"ssh_sudo_user", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo_user"]
),
"identities_only": self.opts.get(
"ssh_identities_only",
salt.config.DEFAULT_MASTER_OPTS["ssh_identities_only"],
),
"remote_port_forwards": self.opts.get("ssh_remote_port_forwards"),
"ssh_options": self.opts.get("ssh_options"),
}
if self.opts.get("rand_thin_dir"):
self.defaults["thin_dir"] = os.path.join(
"/var/tmp", ".{}".format(uuid.uuid4().hex[:6])
)
self.opts["ssh_wipe"] = "True"
self.returners = salt.loader.returners(self.opts, {})
self.fsclient = salt.fileclient.FSClient(self.opts)
self.thin = salt.utils.thin.gen_thin(
self.opts["cachedir"],
extra_mods=self.opts.get("thin_extra_mods"),
overwrite=self.opts["regen_thin"],
extended_cfg=self.opts.get("ssh_ext_alternatives"),
)
self.mods = mod_data(self.fsclient)
@property
def parse_tgt(self):
"""
Method to determine the hostname and user
when bypassing the roster and using
ssh syntax (ex. root@localhost)
"""
if not self.opts.get("ssh_cli_tgt"):
self.opts["ssh_cli_tgt"] = self.opts.get("tgt", "")
hostname = self.opts.get("ssh_cli_tgt", "")
if isinstance(hostname, str) and "@" in hostname:
user, hostname = hostname.split("@", 1)
else:
user = self.opts.get("ssh_user")
return {"hostname": hostname, "user": user}
def _get_roster(self):
"""
Read roster filename as a key to the data.
:return:
"""
roster_file = salt.roster.get_roster_file(self.opts)
if roster_file not in self.__parsed_rosters:
roster_data = compile_template(
roster_file,
salt.loader.render(self.opts, {}),
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
self.__parsed_rosters[roster_file] = roster_data
return roster_file
def _expand_target(self):
"""
Figures out if the target is a reachable host without wildcards, expands if any.
:return:
"""
# TODO: Support -L
hostname = self.parse_tgt["hostname"]
if isinstance(hostname, list):
return
needs_expansion = "*" not in hostname and salt.utils.network.is_reachable_host(
hostname
)
if needs_expansion:
if hostname is None:
# Reverse lookup failed
return
self._get_roster()
for roster_filename in self.__parsed_rosters:
roster_data = self.__parsed_rosters[roster_filename]
if not isinstance(roster_data, bool):
for host_id in roster_data:
if hostname in [host_id, roster_data[host_id].get("host")]:
if hostname != self.opts["tgt"]:
self.opts["tgt"] = hostname
self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
return
def _update_roster(self, hostname=None, user=None):
"""
Update default flat roster with the passed in information.
:return:
"""
roster_file = self._get_roster()
if os.access(roster_file, os.W_OK):
if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]:
with salt.utils.files.fopen(roster_file, "a") as roster_fp:
roster_fp.write(
'# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n'
" host: {hostname}\n user: {user}\n passwd: {passwd}\n".format(
s_user=getpass.getuser(),
s_time=datetime.datetime.utcnow().isoformat(),
hostname=hostname if hostname else self.opts.get("tgt", ""),
user=user if user else self.opts.get("ssh_user", ""),
passwd=self.opts.get("ssh_passwd", ""),
)
)
log.info(
"The host %s has been added to the roster %s",
self.opts.get("tgt", ""),
roster_file,
)
else:
log.error("Unable to update roster %s: access denied", roster_file)
def _update_targets(self):
"""
Uptade targets in case hostname was directly passed without the roster.
:return:
"""
hosts = self.parse_tgt["hostname"]
user = self.parse_tgt["user"]
if not isinstance(hosts, (list, tuple)):
hosts = list([hosts])
_hosts = list()
for hostname in hosts:
_user = user
if "@" in hostname:
_user, hostname = hostname.split("@", 1)
if hostname == "*":
continue
if salt.utils.network.is_reachable_host(hostname):
_hosts.append(hostname)
self.targets[hostname] = {
"passwd": self.opts.get("ssh_passwd", ""),
"host": hostname,
"user": _user,
}
if self.opts.get("ssh_update_roster"):
self._update_roster(hostname=hostname, user=_user)
if self.tgt_type == "list":
self.opts["tgt"] = _hosts
elif _hosts:
self.opts["tgt"] = _hosts[0]
def get_pubkey(self):
"""
Return the key string for the SSH public key
"""
if (
"__master_opts__" in self.opts
and self.opts["__master_opts__"].get("ssh_use_home_key")
and os.path.isfile(os.path.expanduser("~/.ssh/id_rsa"))
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
pub = "{}.pub".format(priv)
with salt.utils.files.fopen(pub, "r") as fp_:
return "{} rsa root@master".format(fp_.read().split()[1])
def key_deploy(self, host, ret):
"""
Deploy the SSH key if the minions don't auth
"""
if not isinstance(ret[host], dict) or self.opts.get("ssh_key_deploy"):
target = self.targets[host]
if target.get("passwd", False) or self.opts["ssh_passwd"]:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get("stderr", "").count("Permission denied"):
target = self.targets[host]
# permission denied, attempt to auto deploy ssh key
print(
"Permission denied for host {}, do you want to deploy "
"the salt-ssh key? (password required):".format(host)
)
deploy = input("[Y/n] ")
if deploy.startswith(("n", "N")):
return ret
target["passwd"] = getpass.getpass(
"Password for {}@{}: ".format(target["user"], host)
)
return self._key_deploy_run(host, target, True)
return ret
def _key_deploy_run(self, host, target, re_run=True):
"""
The ssh-copy-id routine
"""
argv = [
"ssh.set_auth_key",
target.get("user", "root"),
self.get_pubkey(),
]
single = Single(
self.opts,
argv,
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
if salt.utils.path.which("ssh-copy-id"):
# we have ssh-copy-id, use it!
stdout, stderr, retcode = single.shell.copy_id()
else:
stdout, stderr, retcode = single.run()
if re_run:
target.pop("passwd")
single = Single(
self.opts,
self.opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
stdout, stderr, retcode = single.cmd_block()
try:
data = salt.utils.json.find_json(stdout)
return {host: data.get("local", data)}
except Exception: # pylint: disable=broad-except
if stderr:
return {host: stderr}
return {host: "Bad Return"}
if salt.defaults.exitcodes.EX_OK != retcode:
return {host: stderr}
return {host: stdout}
def handle_routine(self, que, opts, host, target, mine=False):
"""
Run the routine in a "Thread", put a dict on the queue
"""
opts = copy.deepcopy(opts)
single = Single(
opts,
opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target
)
ret = {"id": single.id}
stdout, stderr, retcode = single.run()
# This job is done, yield
try:
data = salt.utils.json.find_json(stdout)
if len(data) < 2 and "local" in data:
ret["ret"] = data["local"]
else:
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
except Exception: # pylint: disable=broad-except
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
que.put(ret)
def handle_ssh(self, mine=False):
"""
Spin up the needed threads or processes and execute the subsequent
routines
"""
que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
while True:
if not self.targets:
log.error("No matching targets found in roster.")
break
if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if default not in self.targets[host]:
self.targets[host][default] = self.defaults[default]
if "host" not in self.targets[host]:
self.targets[host]["host"] = host
if self.targets[host].get("winrm") and not HAS_WINSHELL:
returned.add(host)
rets.add(host)
log_msg = (
"Please contact sales@saltstack.com for access to the"
" enterprise saltwinshell module."
)
log.debug(log_msg)
no_ret = {
"fun_args": [],
"jid": None,
"return": log_msg,
"retcode": 1,
"fun": "",
"id": host,
}
yield {host: no_ret}
continue
args = (
que,
self.opts,
host,
self.targets[host],
mine,
)
routine = Process(target=self.handle_routine, args=args)
routine.start()
running[host] = {"thread": routine}
continue
ret = {}
try:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except queue.Empty:
pass
for host in running:
if not running[host]["thread"].is_alive():
if host not in returned:
# Try to get any returns that came through since we
# last checked
try:
while True:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except queue.Empty:
pass
if host not in returned:
error = (
"Target '{}' did not return any data, "
"probably due to an error.".format(host)
)
ret = {"id": host, "ret": error}
log.error(error)
yield {ret["id"]: ret["ret"]}
running[host]["thread"].join()
rets.add(host)
for host in rets:
if host in running:
running.pop(host)
if len(rets) >= len(self.targets):
break
# Sleep when limit or all threads started
if len(running) >= self.opts.get("ssh_max_procs", 25) or len(
self.targets
) >= len(running):
time.sleep(0.1)
def run_iter(self, mine=False, jid=None):
"""
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
"""
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
for ret in self.handle_ssh(mine=mine):
host = next(iter(ret.keys()))
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
yield ret
def cache_job(self, jid, id_, ret, fun):
"""
Cache the job information
"""
self.returners["{}.returner".format(self.opts["master_job_cache"])](
{"jid": jid, "id": id_, "return": ret, "fun": fun}
)
def run(self, jid=None):
"""
Execute the overall routine, print results via outputters
"""
if self.opts.get("list_hosts"):
self._get_roster()
ret = {}
for roster_file in self.__parsed_rosters:
if roster_file.startswith("#"):
continue
ret[roster_file] = {}
for host_id in self.__parsed_rosters[roster_file]:
hostname = self.__parsed_rosters[roster_file][host_id]["host"]
ret[roster_file][host_id] = hostname
salt.output.display_output(ret, "nested", self.opts)
sys.exit()
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
try:
if isinstance(jid, bytes):
jid = jid.decode("utf-8")
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Could not save load with returner %s: %s",
self.opts["master_job_cache"],
exc,
exc_info=True,
)
if self.opts.get("verbose"):
msg = "Executing job with jid {}".format(jid)
print(msg)
print("-" * len(msg) + "\n")
print("")
sret = {}
outputter = self.opts.get("output", "nested")
final_exit = 0
for ret in self.handle_ssh():
host = next(iter(ret.keys()))
if isinstance(ret[host], dict):
host_ret = ret[host].get("retcode", 0)
if host_ret != 0:
final_exit = 1
else:
# Error on host
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if isinstance(ret[host], dict) and (
ret[host].get("stderr") or ""
).startswith("ssh:"):
ret[host] = ret[host]["stderr"]
if not isinstance(ret[host], dict):
p_data = {host: ret[host]}
elif "return" not in ret[host]:
p_data = ret
else:
outputter = ret[host].get("out", self.opts.get("output", "nested"))
p_data = {host: ret[host].get("return", {})}
if self.opts.get("static"):
sret.update(p_data)
else:
salt.output.display_output(p_data, outputter, self.opts)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
if self.event is not None:
self.event.destroy()
if self.opts.get("static"):
salt.output.display_output(sret, outputter, self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
class Single:
"""
Hold onto a single ssh execution
"""
# 1. Get command ready
# 2. Check if target has salt
# 3. deploy salt-thin
# 4. execute requested command via salt-thin
def __init__(
self,
opts,
argv,
id_,
host,
user=None,
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=30,
sudo=False,
tty=False,
mods=None,
fsclient=None,
thin=None,
mine=False,
minion_opts=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None,
winrm=False,
ssh_options=None,
**kwargs
):
# Get mine setting and mine_functions if defined in kwargs (from roster)
self.mine = mine
self.mine_functions = kwargs.get("mine_functions")
self.cmd_umask = kwargs.get("cmd_umask", None)
self.winrm = winrm
self.opts = opts
self.tty = tty
if kwargs.get("disable_wipe"):
self.wipe = False
else:
self.wipe = bool(self.opts.get("ssh_wipe"))
if kwargs.get("thin_dir"):
self.thin_dir = kwargs["thin_dir"]
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get("ssh_python_env")
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", user)
else:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", "root")
self.thin_dir = thin_dir.replace(
"%%FQDNUUID%%",
uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[
:6
],
)
self.opts["thin_dir"] = self.thin_dir
self.fsclient = fsclient
self.context = {"master_opts": self.opts, "fileclient": self.fsclient}
self.ssh_pre_flight = kwargs.get("ssh_pre_flight", None)
if self.ssh_pre_flight:
self.ssh_pre_file = os.path.basename(self.ssh_pre_flight)
if isinstance(argv, str):
self.argv = [argv]
else:
self.argv = argv
self.fun, self.args, self.kwargs = self.__arg_comps()
self.id = id_
self.set_path = kwargs.get("set_path", "")
self.mods = mods if isinstance(mods, dict) else {}
args = {
"host": host,
"user": user,
"port": port,
"passwd": passwd,
"priv": priv,
"priv_passwd": priv_passwd,
"timeout": timeout,
"sudo": sudo,
"tty": tty,
"mods": self.mods,
"identities_only": identities_only,
"sudo_user": sudo_user,
"remote_port_forwards": remote_port_forwards,
"winrm": winrm,
"ssh_options": ssh_options,
}
# Pre apply changeable defaults
self.minion_opts = {
"grains_cache": True,
"log_file": "salt-call.log",
}
self.minion_opts.update(opts.get("ssh_minion_opts", {}))
if minion_opts is not None:
self.minion_opts.update(minion_opts)
# Post apply system needed defaults
self.minion_opts.update(
{
"root_dir": os.path.join(self.thin_dir, "running_data"),
"id": self.id,
"sock_dir": "/",
"fileserver_list_cache_time": 3,
}
)
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
self.target = kwargs
self.target.update(args)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd("powershell $ENV:PROCESSOR_ARCHITECTURE")
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"])
def __arg_comps(self):
"""
Return the function name and the arg list
"""
fun = self.argv[0] if self.argv else ""
parsed = salt.utils.args.parse_input(
self.argv[1:], condition=False, no_parse=self.opts.get("no_parse", [])
)
args = parsed[0]
kws = parsed[1]
return fun, args, kws
def _escape_arg(self, arg):
"""
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
"""
if self.winrm:
return arg
return "".join(["\\" + char if re.match(r"\W", char) else char for char in arg])
def run_ssh_pre_flight(self):
"""
Run our pre_flight script before running any ssh commands
"""
script = os.path.join(tempfile.gettempdir(), self.ssh_pre_file)
self.shell.send(self.ssh_pre_flight, script)
return self.execute_script(script)
def check_thin_dir(self):
"""
check if the thindir exists on the remote machine
"""
stdout, stderr, retcode = self.shell.exec_cmd(
"test -d {}".format(self.thin_dir)
)
if retcode != 0:
return False
return True
def deploy(self):
"""
Deploy salt-thin
"""
self.shell.send(
self.thin,
os.path.join(self.thin_dir, "salt-thin.tgz"),
)
self.deploy_ext()
return True
def deploy_ext(self):
"""
Deploy the ext_mods tarball
"""
if self.mods.get("file"):
self.shell.send(
self.mods["file"],
os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
)
return True
def run(self, deploy_attempted=False):
"""
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
"""
stdout = stderr = retcode = None
if self.ssh_pre_flight:
if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
log.info(
"%s thin dir already exists. Not running ssh_pre_flight script",
self.thin_dir,
)
elif not os.path.exists(self.ssh_pre_flight):
log.error(
"The ssh_pre_flight script %s does not exist", self.ssh_pre_flight
)
else:
stdout, stderr, retcode = self.run_ssh_pre_flight()
if retcode != 0:
log.error(
"Error running ssh_pre_flight script %s", self.ssh_pre_file
)
return stdout, stderr, retcode
log.info(
"Successfully ran the ssh_pre_flight script: %s", self.ssh_pre_file
)
if self.opts.get("raw_shell", False):
cmd_str = " ".join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode
def run_wfunc(self):
"""
Execute a wrapper function
Returns tuple of (json_data, '')
"""
# Ensure that opts/grains are up to date
# Execute routine
data_cache = False
data = None
cdir = os.path.join(self.opts["cachedir"], "minions", self.id)
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, "ssh_data.p")
refresh = False
if not os.path.isfile(datap):
refresh = True
else:
passed_time = (time.time() - os.stat(datap).st_mtime) / 60
if passed_time > self.opts.get("cache_life", 60):
refresh = True
if self.opts.get("refresh_cache"):
refresh = True
conf_grains = {}
# Save conf file grains before they get clobbered
if "ssh_grains" in self.opts:
conf_grains = self.opts["ssh_grains"]
if not data_cache:
refresh = True
if refresh:
# Make the datap
# TODO: Auto expire the datap
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(
self.opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
opts_pkg = pre_wrapper["test.opts_pkg"]() # pylint: disable=E1102
if "_error" in opts_pkg:
# Refresh failed
retcode = opts_pkg["retcode"]
ret = salt.utils.json.dumps({"local": opts_pkg})
return ret, retcode
opts_pkg["file_roots"] = self.opts["file_roots"]
opts_pkg["pillar_roots"] = self.opts["pillar_roots"]
opts_pkg["ext_pillar"] = self.opts["ext_pillar"]
opts_pkg["extension_modules"] = self.opts["extension_modules"]
opts_pkg["module_dirs"] = self.opts["module_dirs"]
opts_pkg["_ssh_version"] = self.opts["_ssh_version"]
opts_pkg["thin_dir"] = self.opts["thin_dir"]
opts_pkg["master_tops"] = self.opts["master_tops"]
opts_pkg["extra_filerefs"] = self.opts.get("extra_filerefs", "")
opts_pkg["__master_opts__"] = self.context["master_opts"]
if "known_hosts_file" in self.opts:
opts_pkg["known_hosts_file"] = self.opts["known_hosts_file"]
if "_caller_cachedir" in self.opts:
opts_pkg["_caller_cachedir"] = self.opts["_caller_cachedir"]
else:
opts_pkg["_caller_cachedir"] = self.opts["cachedir"]
# Use the ID defined in the roster file
opts_pkg["id"] = self.id
retcode = 0
# Restore master grains
for grain in conf_grains:
opts_pkg["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts_pkg["grains"][grain] = self.target["grains"][grain]
popts = {}
popts.update(opts_pkg["__master_opts__"])
popts.update(opts_pkg)
pillar = salt.pillar.Pillar(
popts,
opts_pkg["grains"],
opts_pkg["id"],
opts_pkg.get("saltenv", "base"),
)
pillar_data = pillar.compile_pillar()
# TODO: cache minion opts in datap in master.py
data = {
"opts": opts_pkg,
"grains": opts_pkg["grains"],
"pillar": pillar_data,
}
if data_cache:
with salt.utils.files.fopen(datap, "w+b") as fp_:
fp_.write(salt.payload.dumps(data))
if not data and data_cache:
with salt.utils.files.fopen(datap, "rb") as fp_:
data = salt.payload.load(fp_)
opts = data.get("opts", {})
opts["grains"] = data.get("grains")
# Restore master grains
for grain in conf_grains:
opts["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts["grains"][grain] = self.target["grains"][grain]
opts["pillar"] = data.get("pillar")
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
wrapper.fsclient.opts["cachedir"] = opts["cachedir"]
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
# We're running in the mine, need to fetch the arguments from the
# roster, pillar, master config (in that order)
if self.mine:
mine_args = None
mine_fun_data = None
mine_fun = self.fun
if self.mine_functions and self.fun in self.mine_functions:
mine_fun_data = self.mine_functions[self.fun]
elif opts["pillar"] and self.fun in opts["pillar"].get(
"mine_functions", {}
):
mine_fun_data = opts["pillar"]["mine_functions"][self.fun]
elif self.fun in self.context["master_opts"].get("mine_functions", {}):
mine_fun_data = self.context["master_opts"]["mine_functions"][self.fun]
if isinstance(mine_fun_data, dict):
mine_fun = mine_fun_data.pop("mine_function", mine_fun)
mine_args = mine_fun_data
elif isinstance(mine_fun_data, list):
for item in mine_fun_data[:]:
if isinstance(item, dict) and "mine_function" in item:
mine_fun = item["mine_function"]
mine_fun_data.pop(mine_fun_data.index(item))
mine_args = mine_fun_data
else:
mine_args = mine_fun_data
# If we found mine_args, replace our command's args
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[mine_fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = "TypeError encountered executing {}: {}".format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
except Exception as exc: # pylint: disable=broad-except
result = "An Exception occurred while executing {}: {}".format(
self.fun, exc
)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
# Mimic the json data-structure that "salt-call --local" will
# emit (as seen in ssh_py_shim.py)
if isinstance(result, dict) and "local" in result:
ret = salt.utils.json.dumps({"local": result["local"]})
else:
ret = salt.utils.json.dumps({"local": {"return": result}})
return ret, retcode
def _cmd_str(self):
"""
Prepare the command string
"""
sudo = "sudo" if self.target["sudo"] else ""
sudo_user = self.target["sudo_user"]
if "_caller_cachedir" in self.opts:
cachedir = self.opts["_caller_cachedir"]
else:
cachedir = self.opts["cachedir"]
thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1")
debug = ""
if not self.opts.get("log_level"):
self.opts["log_level"] = "info"
if (
salt.log.LOG_LEVELS["debug"]
>= salt.log.LOG_LEVELS[self.opts.get("log_level", "info")]
):
debug = "1"
arg_str = '''
OPTIONS.config = \
"""
{config}
"""
OPTIONS.delimiter = '{delimeter}'
OPTIONS.saltdir = '{saltdir}'
OPTIONS.checksum = '{checksum}'
OPTIONS.hashfunc = '{hashfunc}'
OPTIONS.version = '{version}'
OPTIONS.ext_mods = '{ext_mods}'
OPTIONS.wipe = {wipe}
OPTIONS.tty = {tty}
OPTIONS.cmd_umask = {cmd_umask}
OPTIONS.code_checksum = {code_checksum}
ARGS = {arguments}\n'''.format(
config=self.minion_config,
delimeter=RSTR,
saltdir=self.thin_dir,
checksum=thin_sum,
hashfunc="sha1",
version=salt.version.__version__,
ext_mods=self.mods.get("version", ""),
wipe=self.wipe,
tty=self.tty,
cmd_umask=self.cmd_umask,
code_checksum=thin_code_digest,
arguments=self.argv,
)
py_code = SSH_PY_SHIM.replace("#%%OPTS", arg_str)
py_code_enc = base64.encodebytes(py_code.encode("utf-8")).decode("utf-8")
if not self.winrm:
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
SET_PATH=self.set_path,
)
else:
cmd = saltwinshell.gen_shim(py_code_enc)
return cmd
def execute_script(self, script, extension="py", pre_dir=""):
"""
execute a script on the minion then delete
"""
if extension == "ps1":
ret = self.shell.exec_cmd('"powershell {}"'.format(script))
else:
if not self.winrm:
ret = self.shell.exec_cmd("/bin/sh '{}{}'".format(pre_dir, script))
else:
ret = saltwinshell.call_python(self, script)
# Remove file from target system
if not self.winrm:
self.shell.exec_cmd("rm '{}{}'".format(pre_dir, script))
else:
self.shell.exec_cmd("del {}".format(script))
return ret
def shim_cmd(self, cmd_str, extension="py"):
"""
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
"""
if not self.tty and not self.winrm:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a temporary file in the default temp directory
with tempfile.NamedTemporaryFile(
mode="w+b", prefix="shim_", delete=False
) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = ".{}.{}".format(
binascii.hexlify(os.urandom(6)).decode("ascii"), extension
)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except OSError:
pass
ret = self.execute_script(script=target_shim_file, extension=extension)
return ret
def cmd_block(self, is_retry=False):
"""
Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results
"""
self.argv = _convert_args(self.argv)
log.debug(
"Performing shimmed, blocking command as follows:\n%s",
" ".join([str(arg) for arg in self.argv]),
)
cmd_str = self._cmd_str()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
log.trace("STDOUT %s\n%s", self.target["host"], stdout)
log.trace("STDERR %s\n%s", self.target["host"], stderr)
log.debug("RETCODE %s: %s", self.target["host"], retcode)
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if error == "Python environment not found on Windows system":
saltwinshell.deploy_python(self)
stdout, stderr, retcode = self.shim_cmd(cmd_str)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif error == "Undefined SHIM state":
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying thin, undefined state: {}".format(
stdout
),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return "ERROR: {}".format(error), stderr, retcode
# FIXME: this discards output from ssh_shim if the shim succeeds. It should
# always save the shim output regardless of shim success or failure.
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if re.search(RSTR_RE, stderr):
# Found RSTR in stderr which means SHIM completed and only
# and remaining output is only from salt.
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
# RSTR was found in stdout but not stderr - which means there
# is a SHIM command for the master.
shim_command = re.split(r"\r?\n", stdout, 1)[0].strip()
log.debug("SHIM retcode(%s) and command: %s", retcode, shim_command)
if (
"deploy" == shim_command
and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY
):
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
if not self.tty:
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
return self.cmd_block()
elif not re.search(RSTR_RE, stdout):
# If RSTR is not seen in stdout with tty, then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = ""
else:
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif "ext_mods" == shim_command:
self.deploy_ext()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying ext_mods: {}".format(stdout),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return stdout, stderr, retcode
def categorize_shim_errors(self, stdout_bytes, stderr_bytes, retcode):
stdout = salt.utils.stringutils.to_unicode(stdout_bytes)
stderr = salt.utils.stringutils.to_unicode(stderr_bytes)
if re.search(RSTR_RE, stdout) and stdout != RSTR + "\n":
# RSTR was found in stdout which means that the shim
# functioned without *errors* . . . but there may be shim
# commands, unless the only thing we found is RSTR
return None
if re.search(RSTR_RE, stderr):
# Undefined state
return "Undefined SHIM state"
if stderr.startswith("Permission denied"):
# SHIM was not even reached
return None
perm_error_fmt = (
"Permissions problem, target user may need to be root or use sudo:\n {0}"
)
errors = [
(
(),
"sudo: no tty present and no askpass program specified",
"sudo expected a password, NOPASSWD required",
),
(
(salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,),
"Python interpreter is too old",
"Python version error. Recommendation(s) follow:\n"
"- Install Python 3 on the target machine(s)\n"
"- You can use ssh_pre_flight or raw shell (-r) to install Python 3",
),
(
(salt.defaults.exitcodes.EX_THIN_CHECKSUM,),
"checksum mismatched",
"The salt thin transfer was corrupted",
),
(
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
"scp not found",
"No scp binary. openssh-clients package required",
),
(
(salt.defaults.exitcodes.EX_CANTCREAT,),
"salt path .* exists but is not a directory",
"A necessary path for salt thin unexpectedly exists:\n " + stderr,
),
(
(),
"sudo: sorry, you must have a tty to run sudo",
"sudo is configured with requiretty",
),
((), "Failed to open log file", perm_error_fmt.format(stderr)),
((), "Permission denied:.*/salt", perm_error_fmt.format(stderr)),
(
(),
"Failed to create directory path.*/salt",
perm_error_fmt.format(stderr),
),
(
(salt.defaults.exitcodes.EX_SOFTWARE,),
"exists but is not",
"An internal error occurred with the shim, please investigate:\n "
+ stderr,
),
(
(),
"The system cannot find the path specified",
"Python environment not found on Windows system",
),
(
(),
"is not recognized",
"Python environment not found on Windows system",
),
]
for error in errors:
if retcode in error[0] or re.search(error[1], stderr):
return error[2]
return None
def check_refresh(self, data, ret):
"""
Stub out check_refresh
"""
return
def module_refresh(self):
"""
Module refresh is not needed, stub it out
"""
return
def lowstate_file_refs(chunks):
"""
Create a list of file ref objects to reconcile
"""
refs = {}
for chunk in chunks:
saltenv = "base"
crefs = []
for state in chunk:
if state == "__env__":
saltenv = chunk[state]
elif state == "saltenv":
saltenv = chunk[state]
elif state.startswith("__"):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
return refs
def salt_refs(data):
"""
Pull salt file references out of the states
"""
proto = "salt://"
ret = []
if isinstance(data, str):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, str):
if comp.startswith(proto):
ret.append(comp)
return ret
def mod_data(fsclient):
"""
Generate the module arguments for the shim data
"""
# TODO, change out for a fileserver backend
sync_refs = [
"modules",
"states",
"grains",
"renderers",
"returners",
]
ret = {}
envs = fsclient.envs()
ver_base = ""
for env in envs:
files = fsclient.file_list(env)
for ref in sync_refs:
mods_data = {}
pref = "_{}".format(ref)
for fn_ in sorted(files):
if fn_.startswith(pref):
if fn_.endswith((".py", ".so", ".pyx")):
full = salt.utils.url.create(fn_)
mod_path = fsclient.cache_file(full, env)
if not os.path.isfile(mod_path):
continue
mods_data[os.path.basename(fn_)] = mod_path
chunk = salt.utils.hashutils.get_hash(mod_path)
ver_base += chunk
if mods_data:
if ref in ret:
ret[ref].update(mods_data)
else:
ret[ref] = mods_data
if not ret:
return {}
ver_base = salt.utils.stringutils.to_bytes(ver_base)
ver = hashlib.sha1(ver_base).hexdigest()
ext_tar_path = os.path.join(
fsclient.opts["cachedir"], "ext_mods.{}.tgz".format(ver)
)
mods = {"version": ver, "file": ext_tar_path}
if os.path.isfile(ext_tar_path):
return mods
tfp = tarfile.open(ext_tar_path, "w:gz")
verfile = os.path.join(fsclient.opts["cachedir"], "ext_mods.ver")
with salt.utils.files.fopen(verfile, "w+") as fp_:
fp_.write(ver)
tfp.add(verfile, "ext_version")
for ref in ret:
for fn_ in ret[ref]:
tfp.add(ret[ref][fn_], os.path.join(ref, fn_))
tfp.close()
return mods
def ssh_version():
"""
Returns the version of the installed ssh command
"""
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
["ssh", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
try:
version_parts = ret[1].split(b",")[0].split(b"_")[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
def _convert_args(args):
"""
Take a list of args, and convert any dicts inside the list to keyword
args in the form of `key=value`, ready to be passed to salt-ssh
"""
converted = []
for arg in args:
if isinstance(arg, dict):
for key in list(arg.keys()):
if key == "__kwarg__":
continue
converted.append("{}={}".format(key, arg[key]))
else:
converted.append(arg)
return converted
|
thunk.py
|
# py->binary (.so/.dylib/.dll) thunk
#
# this is separate from __init__.py to allow testing without Binary Ninja
import os
import binascii
import platform
import threading
import ctypes
def doit_worker(dll, shellcode):
dynamic_type = {'Darwin':'dylib', 'Windows':'dll', 'Linux':'so'}[platform.system()]
ccp = ctypes.c_char_p(shellcode)
scaddr = ctypes.cast(ccp, ctypes.c_void_p).value
print('THUNK: calling to %s.doit(0x%X, 0x%X)' % (dynamic_type, scaddr, len(shellcode)))
rc = dll.doit(ccp, len(shellcode))
print('THUNK: %s returned %d' % (dynamic_type, rc))
def doit(shellcode, use_thread=True):
shellcode_str = binascii.hexlify(shellcode).decode('utf-8')
#shellcode_str = ' '.join([shellcode_str[x:x+2] for x in range(0,len(shellcode_str),2)])
print('THUNK: running', shellcode_str)
# resolve path to dll
fpath = os.path.abspath(__file__)
fpath = os.path.dirname(fpath)
fpath = os.path.join(fpath, 'callbuf')
system = platform.system()
if system == 'Darwin':
fpath = os.path.join(fpath, 'callbuf.dylib')
elif system == 'Windows':
fpath = os.path.join(fpath, 'callbuf.dll')
elif system == 'Linux':
fpath = os.path.join(fpath, 'callbuf.so')
else:
raise Exception('unknown platform/system: %s' % system)
# load dll
print('THUNK: loading', fpath)
dll = ctypes.CDLL(fpath)
# call into dll
if use_thread:
print('THUNK: creating thread')
threading.Thread(target=doit_worker, args=(dll, shellcode)).start()
else:
doit_worker(shellcode)
print('THUNK: returning')
|
mb2mqtt.py
|
from pyModbusTCP.client import ModbusClient
import paho.mqtt.client as mqtt
from time import sleep
from threading import Thread
import sys
class ClienteMODBUS():
"""
Classe Cliente MODBUS
"""
def __init__(self,server_addr,porta,device_id,broker_addr,broker_port,scan_time=0.2):
"""
Construtor
"""
self._scan_time = scan_time
self._server_ip = server_addr
self._device_id = device_id
self._port = porta
self._cliente = ModbusClient(host=server_addr, port=porta, unit_id=device_id)
self._broker_addrs = broker_addr
self._broker_port = broker_port
self._client_mqtt = mqtt.Client()
self._status_conn_mqtt = False
self._threadread = None
self._readingthread = False
def atendimento(self):
"""
Método para atendimento do usuário
"""
try:
print('\n--> Testing Modbus Connection.. ', end='')
self._cliente.open()
print('OK')
print('--> Testing MQTT BROKER Connection.. ', end='')
sleep(1)
try:
if self._client_mqtt.connect(self._broker_addrs, self._broker_port, 60) != 0:
print("Unable to establish connection with MQTT Broker!")
sys.exit(-1)
else:
print('--> OK')
self._status_conn_mqtt = True
except Exception as e:
print('ERROR: ', e.args)
print("\nUnable to establish connection with MQTT Broker!\nCheck if the IPv4 Address is OK and try again...")
print('Following without connection with MQTT Broker..')
self._client_mqtt.disconnect()
except Exception as e:
print('ERRO: ', e.args)
try:
atendimento = True
while atendimento:
print('-' * 100)
print('ModbusTCP/MQTT Client'.center(100))
print('-' * 100)
sel = input("Available services: \n1- Start a read \n2- Stop a read \n3- Write a value \n4- Configuration \n5- Exit \nService N°: ")
if sel == '1':
print('\nAvailable Function Codes:')
print("1- Coil Status \n2- Input Status \n3- Holding Register \n4- Input Register")
while True:
tipo = int(input("Function Code: "))
if tipo > 5:
print('Enter a valid type..')
sleep(0.5)
else:
break
if tipo == 3 or tipo == 4:
while True:
#val = int(input("\nAvailable types of display:\n1- Decimal \n2- Floating Point \n3- Float Swapped \nDisplay: "))
val = 1
if val > 3:
print('Enter a valid type..')
sleep(0.5)
else:
break
if tipo == 3:
func = "F03-HoldingRegister"
else:
func = "F04-InputRegister"
if val == 1: #valores decimais
addr = int(input(f'\nModbus Starting Address: '))
leng = int(input(f'Quantity of Registers: '))
print('\nReading has started and data is being published to the specified topic...\n')
sleep(0.5)
try:
self._readingthread = True
self._threadread = Thread(target=self.readThread, args=(tipo, 1, addr, leng, func,))
self._threadread.start()
except Exception as e:
print('ERRO: ', e.args)
try:
sleep(0.5)
print('\nTentando novamente..')
if not self._cliente.is_open():
self._cliente.open()
sleep(0.5)
for i in range(0, int(nvezes)):
print(f'Read {i + 1}:', end='')
modbusValues = self.lerDado(int(tipo),int(addr),leng)
print(modbusValues)
self.mqttPublish(topic="test/status", msg=f"Leitura {i+1} {func}: {modbusValues}")
sleep(self._scan_time)
print('\nModbus reads have been published to broker via topic "test/status" successfully!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args)
print('\nClient was unable to receive a response.. \nBack to menu..\n\n')
sleep(1.5)
elif val == 2: #valores FLOAT
addr = int(input(f'\nModbus Starting Address: '))
leng = int(input(f'Quantity of Registers: '))
print('\nReading has started and data is being published to the specified topic...\n')
sleep(0.5)
try:
self._readingthread = True
self._threadread = Thread(target=self.readThread, args=(tipo, 2, addr, leng, func,))
self._threadread.start()
except Exception as e:
print('ERRO: ', e.args)
try:
sleep(0.5)
print('\nTentando novamente..')
if not self._cliente.is_open():
self._cliente.open()
sleep(0.5)
for i in range(0, int(nvezes)):
print(f'Leitura {i + 1}:', end='')
modbusValues = self.lerDadoFloat(int(tipo),int(addr),leng)
print(modbusValues)
self.mqttPublish(topic="test/status", msg=f"Leitura {i+1} {func}: {modbusValues}")
sleep(self._scan_time)
print('\nLeituras Modbus foram publicadas no broker através do tópico /test/status/ \nE inseridas no Banco de Dados local com sucesso!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args)
print('\nO Cliente não conseguiu receber uma resposta.. \nVoltando ao menu..\n\n')
sleep(1.5)
elif val == 3: #valores FLOAT SWAPPED
addr = int(input(f'\nModbus Starting Address: '))
leng = int(input(f'Quantity of Registers: '))
nvezes = input('Quantidade de leituras: ')
print('\nReading has started and data is being published to the specified topic...\n')
sleep(0.5)
try:
for i in range(0, int(nvezes)):
print(f'Leitura {i + 1}:', end='')
modbusValues = self.lerDadoFloatSwapped(int(tipo), int(addr), leng)
print(modbusValues)
self.mqttPublish(topic="test/status", msg=f"Leitura {i+1} {func}: {modbusValues}")
sleep(self._scan_time)
print('\nLeituras Modbus foram publicadas no broker através do tópico /test/status/ \nE inseridas no Banco de Dados local com sucesso!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args, '\n')
print('O Cliente não conseguiu receber uma resposta.. \nVoltando ao menu..\n\n')
sleep(1.5)
else:
print('Not found..\n')
sleep(0.7)
else:
addr = int(input(f'\nModbus Starting Address: '))
leng = int(input(f'Quantity of Registers: '))
nvezes = input('Quantidade de leituras: ')
print('\nReading has started and data is being published to the specified topic...\n')
sleep(0.3)
if tipo == 1:
func = "F01-CoilStatus"
else:
func = "F02-InputStatus"
try:
for i in range(0, int(nvezes)):
print(f'Leitura {i + 1}:', end='')
modbusValues = self.lerDado(int(tipo),int(addr),leng)
print(modbusValues)
self.mqttPublish(topic="test/status", msg=f"Leitura {i+1} {func}: {modbusValues}")
sleep(self._scan_time)
print('\nLeituras Modbus foram publicadas no broker através do tópico /test/status/ \nE inseridas no Banco de Dados local com sucesso!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args, '\n')
print('O Cliente não conseguiu receber uma resposta.. \nVoltando ao menu..\n\n')
sleep(1.5)
elif sel == '2':
try:
self._readingthread = False
print("\nStopping reading..\n")
except Exception as e:
print('ERRO: ', e.args)
print('\nO Cliente não conseguiu escrever.. \nVoltando ao menu..\n\n')
sleep(1.5)
elif sel == '3':
print('\nQual tipo de dado deseja escrever? \n1- Coil Status \n2- Holding Register')
while True:
tipo = int(input("Tipo: "))
if tipo > 2:
print('Digite um tipo válido..')
sleep(0.5)
else:
break
addr = input(f'Digite o endereço: ')
valor = int(input(f'Digite o valor que deseja escrever: '))
try:
print('\nEscrevendo..')
sleep(0.5)
self.escreveDado(int(tipo), int(addr), valor)
except Exception as e:
print('ERRO: ', e.args)
print('\nO Cliente não conseguiu escrever.. \nVoltando ao menu..\n\n')
sleep(1.5)
elif sel == '4':
print('\nSettings: ')
print('1- Modbus Connection Settings \n2- MQTT Broker Settings \n3- Exit')
while True:
tpconfig = input("Set up: ")
if tpconfig not in '123':
print('Digite um tipo de configuração válido.. (1, 2 ou 3)')
sleep(0.5)
else:
break
if int(tpconfig) == 1:
print('')
print('-' * 100)
print('Modbus Communication Settings'.center(100))
print(f'\n-> Current settings: - IP Addrs: {self._server_ip} - TCP Port: {self._port} - Device ID: {self._device_id} - Scan_Time: {self._scan_time}s')
print('\nSettings: \n1- IP Address \n2- TCP Port \n3- Device ID \n4- Scan Time \n5- Exit')
while True:
config = input("Set up: ")
if config not in '12345':
print('Digite um tipo de configuração válido.. (1, 2, 3, 4 ou 5)')
sleep(0.5)
else:
break
if int(config) == 1:
ipserv = str(input(' Novo endereço IP: '))
try:
self._cliente.close()
self._server_ip = ipserv
self._cliente = ModbusClient(host=self._server_ip)
self._cliente.open()
print(f'\nServer IP alterado para {ipserv} com sucesso!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args)
print('\nNão foi possível alterar o endereço IP.. \nVoltando ao menu..\n\n')
sleep(0.5)
elif int(config) == 2:
porttcp = input(' Nova porta TCP: ')
try:
self._cliente.close()
self._port = int(porttcp)
self._cliente = ModbusClient(port=self._port)
self._cliente.open()
print(f'\nTCP port alterado para {porttcp} com sucesso!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args)
print('\nNão foi possível alterar a porta.. \nVoltando ao menu..\n\n')
sleep(0.5)
elif int(config) == 3:
while True:
iddevice = input(' Novo device ID: ')
if 0 <= int(iddevice) < 256:
break
else:
print('Device ID deve ser um número inteiro entre 0 e 256.', end='')
sleep(0.5)
try:
self._cliente.close()
self._device_id = int(iddevice)
self._cliente = ModbusClient(unit_id=self._device_id)
self._cliente.open()
print(f'\nDevice ID alterado para {iddevice} com sucesso!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args)
print('\nNão foi possível alterar o ID do device.. \nVoltando ao menu..\n\n')
sleep(0.5)
elif int(config) == 4:
scant = input(' Novo tempo de varredura [s]: ')
try:
self._scan_time = float(scant)
print(f'\nScan_time alterado para {scant}s com sucesso!!\n')
except Exception as e:
print('ERRO: ', e.args)
print('\nNão foi possível alterar o tempo de varredura.. \nVoltando ao menu..\n\n')
sleep(0.5)
elif int(config) == 5:
print('\nGetting back...\n')
sleep(0.5)
else:
print('Not found..\n')
sleep(0.7)
elif int(tpconfig) == 2:
print('')
print('-' * 100)
print('MQTT Broker Settings'.center(100))
print(f'\n-> Current settings: - IP Addrs: {self._broker_addrs} - Port: {self._broker_port}')
print('\nSettings: \n1- IP Address \n2- Port \n3- Exit')
while True:
config = input("Set up: ")
if config not in '123':
print('Digite um tipo de configuração válido.. (1, 2 ou 3)')
sleep(0.5)
else:
break
if int(config) == 1:
ipserv = str(input(' Novo endereço IP do broker: '))
try:
self._broker_addrs = ipserv
print('\n--> Testando comunicação com o Broker MQTT.. ', end='')
sleep(0.5)
try:
if self._client_mqtt.connect(self._broker_addrs, self._broker_port, 60) != 0:
print("Não foi possível estabelecer conexão com o Broker MQTT!")
sys.exit(-1)
else:
print(' --> Tudo OK')
print(f'Broker IP alterado para {ipserv} com sucesso!!\n')
self._status_conn_mqtt = True
sleep(0.2)
except Exception as e:
print('ERRO: ', e.args)
print("\nNão foi possível estabelecer conexão com o Broker MQTT!\nVerifique se o Endereço IPv4 está OK e tente novamente..")
print('Seguindo sem conexão com o Broker MQTT..')
self._status_conn_mqtt = False
self._client_mqtt.disconnect()
except Exception as e:
print('ERRO: ', e.args)
print('\nNão foi possível alterar o endereço IP.. \nVoltando ao menu..\n\n')
sleep(0.5)
elif int(config) == 2:
portbroker = input(' Nova porta: ')
try:
self._broker_port = portbroker
print(f'\nPorta alterada para {portbroker} com sucesso!!\n')
sleep(0.5)
except Exception as e:
print('ERRO: ', e.args)
print('\nNão foi possível alterar a porta.. \nVoltando ao menu..\n\n')
sleep(0.5)
elif int(config) == 3:
print('\nGetting back...\n')
sleep(0.5)
else:
print('Not found..\n')
sleep(0.7)
else:
print('\nGetting back...\n')
sleep(0.5)
elif sel == '5':
confirm_close = input('\nType "YES" to confirm you want to exit the app: ').capitalize()[0]
if confirm_close == 'Y':
sleep(0.2)
print('\nShutting down...\n')
sleep(1)
self._cliente.close()
atendimento = False
else:
print('\nGetting back..')
else:
print('Not found..\n')
sleep(0.7)
except Exception as e:
print('ERRO: ', e.args)
def lerDado(self, tipo, addr, leng=1):
"""
Método para leitura MODBUS
"""
if tipo == 1:
co = self._cliente.read_coils(addr - 1, leng)
tipo = "F01-CoilStatus"
disp = "Decimal"
return co
elif tipo == 2:
di = self._cliente.read_discrete_inputs(addr - 1, leng)
tipo = "F02-InputStatus"
disp = "Decimal"
return di
elif tipo == 3:
hr = self._cliente.read_holding_registers(addr - 1, leng)
tipo = "F03-HoldingRegister"
disp = "Decimal"
return hr
elif tipo == 4:
ir = self._cliente.read_input_registers(addr - 1, leng)
tipo = "F04-InputRegister"
disp = "Decimal"
return ir
else:
print('Tipo de leitura inválido..')
def lerDadoFloat(self, tipo, addr, leng):
"""
Método para leitura FLOAT MODBUS
"""
i = 0
g = 0
e1 = []
listfloat = []
while i < leng:
if tipo == 3:
i1 = self._cliente.read_holding_registers(addr - 1 + g, 2)
tipore = "F03-HoldingRegister"
elif tipo == 4:
i1 = self._cliente.read_input_registers(addr - 1 + g, 2)
tipore = "F04-InputRegister"
else:
print('Tipo inválido..')
for x in i1:
x = bin(x).lstrip("0b")
e1.insert(0 + g, x)
i += 1
g += 2
e = 0
while e <= leng:
e2 = ''
for x in e1:
e2 = str(f'{e2}{x.rjust(16, "0")} ')
e += 1
b2 = str(f'{e2}')
e3 = b2.split()
y = 0
while y < len(e3):
ieee = f'{e3[0+y]}{e3[1+y]}'
sign = int(ieee[0])
expo = str(ieee[1:9])
expodec = 0
expopot = 7
for i in range(8):
expodec = expodec + (int(expo[i]) * (2**expopot))
expopot -= 1
mant = str(ieee[9:])
mantdec = 0
mantpot = -1
for i in range(23):
mantdec = mantdec + (int(mant[i]) * (2 ** mantpot))
mantpot -= 1
value = ((-1)**sign)*(1+mantdec)*2**(expodec-127)
listfloat.append(round(value, 3))
y += 2
tipo = tipore
disp = "Floating Point"
return listfloat
def lerDadoFloatSwapped(self, tipo, addr, leng):
"""
Método para leitura FLOAT SWAPPED MODBUS
"""
i = 0
g = 0
e1 = []
listfloatsp = []
while i < leng:
if tipo == 3:
i1 = self._cliente.read_holding_registers(addr - 1 + g, 2)
tipore = "F03-HoldingRegister"
elif tipo == 4:
i1 = self._cliente.read_input_registers(addr - 1 + g, 2)
tipore = "F04-InputRegister"
else:
print('Tipo inválido..')
i2 = i1[::-1]
for x in i2:
x = bin(x).lstrip("0b")
e1.insert(0 + g, x)
i += 1
g += 2
e = 0
while e <= leng:
e2 = ''
for x in e1:
e2 = str(f'{e2}{x.rjust(16, "0")} ')
e += 1
b2 = str(f'{e2}')
e3 = b2.split()
y = 0
while y < len(e3):
ieee = f'{e3[0+y]}{e3[1+y]}'
sign = int(ieee[0])
expo = str(ieee[1:9])
expodec = 0
expopot = 7
for i in range(8):
expodec = expodec + (int(expo[i]) * (2**expopot))
expopot -= 1
mant = str(ieee[9:])
mantdec = 0
mantpot = -1
for i in range(23):
mantdec = mantdec + (int(mant[i]) * (2 ** mantpot))
mantpot -= 1
value = ((-1)**sign)*(1+mantdec)*2**(expodec-127)
listfloatsp.append(round(value, 3))
y += 2
tipo = tipore
disp = "Float (Swapped)"
return listfloatsp
def escreveDado(self, tipo, addr, valor):
"""
Método para escrita MODBUS
"""
try:
if tipo == 1:
print(f'Valor {valor} escrito no endereço {addr}\n')
return self._cliente.write_single_coil(addr - 1, valor)
elif tipo == 2:
print(f'Valor {valor} escrito no endereço {addr}\n')
return self._cliente.write_single_register(addr - 1, valor)
else:
print('Tipo de escrita inválido..\n')
except Exception as e:
print('ERRO: ', e.args)
def mqttPublish(self, topic, msg):
"""
Método para escrita MODBUS
"""
try:
if self._client_mqtt.connect(self._broker_addrs, self._broker_port, 60) != 0:
print("Não foi possível estabelecer conexão com o Broker MQTT!")
sys.exit(-1)
self._client_mqtt.publish(topic, msg)
sleep(0.2)
self._client_mqtt.disconnect()
except Exception as e:
print('ERRO: ', e.args, end='')
print('Erro ao tentar publicar no broker, confira o endereço IP e a porta do mesmo..')
self._status_conn_mqtt = False
def readThread(self, tipo, display, addr, leng, func):
"""
Método para thread de leitura 1
"""
if display == 1:
try:
i = 0
while self._readingthread:
modbusValues = self.lerDado(int(tipo),int(addr),leng)
if self._status_conn_mqtt:
self.mqttPublish(topic="test/status", msg=f"Read {i+1} - ({addr}:{leng}) {func}: {modbusValues}")
sleep(self._scan_time)
i += 1
sleep(0.2)
except Exception as e:
print('ERRO: ', e.args, end='')
print('Erro ao tentar publicar no broker, confira o endereço IP e a porta do mesmo..')
elif display == 2:
try:
i = 0
while self._readingthread:
modbusValues = self.lerDadoFloat(int(tipo),int(addr),leng)
if self._status_conn_mqtt:
self.mqttPublish(topic="test/status", msg=f"Read {i+1} - F03.HR (350-369): {modbusValues}")
sleep(self._scan_time)
i += 1
sleep(0.2)
except Exception as e:
print('ERRO: ', e.args, end='')
print('Erro ao tentar publicar no broker, confira o endereço IP e a porta do mesmo..')
elif display == 3:
try:
i = 0
while self._readingthread:
modbusValues = self.lerDadoFloatSwapped(int(tipo),int(addr),leng)
if self._status_conn_mqtt:
self.mqttPublish(topic="test/status", msg=f"Read {i+1} - {func}: {modbusValues}")
sleep(self._scan_time)
i += 1
sleep(0.2)
except Exception as e:
print('ERRO: ', e.args, end='')
print('Erro ao tentar publicar no broker, confira o endereço IP e a porta do mesmo..')
|
etcd_rendezvous.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import sys
import threading
import time
from typing import Optional
import etcd # type: ignore[import]
from torch.distributed.elastic.rendezvous import (
RendezvousClosedError,
RendezvousError,
RendezvousHandler,
RendezvousParameters,
RendezvousTimeoutError,
)
from .utils import _parse_rendezvous_endpoint
from .etcd_store import EtcdStore, cas_delay
_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
_log_handler = logging.StreamHandler(sys.stderr)
_log_handler.setFormatter(_log_fmt)
log = logging.getLogger(__name__)
log.propagate = False
log.setLevel(logging.INFO)
log.addHandler(_log_handler)
# Retryable failure exception means the we were too late to make
# a desired state transition (e.g. because of a race condition),
# and should now restart from the beginning.
# A small delay is recommended to avoid spamming Etcd.
class EtcdRendezvousRetryableFailure(Exception):
pass
# Similar to retryable failure, but the new state we observed suggests we
# can re-try immediately, i.e. without a need for "safety delay".
class EtcdRendezvousRetryImmediately(Exception):
pass
# Default timeout for the rendezvous.
_DEFAULT_TIMEOUT: int = 600 # 10 minutes
# Additional waiting time after reaching the minimum number of nodes
# in case the rendezvous is elastic (min != max).
_DEFAULT_LAST_CALL_TIMEOUT: int = 30 # 30 seconds
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctnes, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
class EtcdRendezvousHandler(RendezvousHandler):
"""
Implements a :py:class:`torchelastic.rendezvous.RendezvousHandler`
interface backed by
:py:class:`torchelastic.rendezvous.etcd_rendezvous.EtcdRendezvous`.
Torchelastic uses a URL to configure the type of rendezvous to use and
to pass implementation specific configurations to the rendezvous module.
The basic etcd rendezvous configuration URL looks like the following
::
etcd://<etcd_address>:<port>/<job_id>?min_workers=<min_workers>&max_workers=<max_workers> # noqa W605
-- example --
etcd://localhost:2379/1234?min_workers=1&max_workers=3
The URL above is interpreted as follows:
1. Use the rendezvous handler that is registered with the ``etcd``
scheme
2. The ``etcd`` endpoint to use is ``localhost:2379``
3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to
share a common etcd server for multiple jobs so long as the
``job_ids`` are guaranteed to be unique). Note that the job id can be
any string (e.g. does not need to be a number) as long as it is
unique.
4. ``min_workers=1`` and ``max_workers=3`` specifies a range for
membership size - torchelastic starts running the job as long as the
cluster size is greater than or equal to ``min_workers`` and admits
up to ``max_workers`` into the cluster.
Below are a full list of the parameters that can be passed to etcd
rendezvous:
+--------------------------------------------+--------------------------+
| Parameter | Description |
+============================================+==========================+
| min_workers | minimum number of |
| | workers for the |
| | rendezvous to be valid |
+--------------------------------------------+--------------------------+
| max_workers | maximum number of |
| | workers to admit |
+--------------------------------------------+--------------------------+
| timeout | total timeout within |
| | which next_rendezvous is |
| | expected to succeed |
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
| | (“last call”) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
+--------------------------------------------+--------------------------+
| etcd_prefix | path prefix (from etcd |
| | root), inside which all |
| | etcd nodes will be |
| | created (defaults to |
| | ``/torchelastic/p2p``) |
+--------------------------------------------+--------------------------+
"""
def __init__(self, rdzv_impl):
self._rdzv_impl = rdzv_impl
def __del__(self):
# TODO: look into using weakref here instead.
del self._rdzv_impl
def get_backend(self) -> str:
return "etcd"
def next_rendezvous(self):
rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier()
log.info("Creating EtcdStore as the c10d::Store implementation")
store = self._rdzv_impl.setup_kv_store(rdzv_version)
return store, rank, world_size
def is_closed(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
return state["status"] == "closed"
except etcd.EtcdKeyNotFound:
# No rendezvous state, so it cannot be closed.
return False
def set_closed(self):
self._rdzv_impl.set_closed()
def num_nodes_waiting(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
if state["status"] == "final":
return state["num_workers_waiting"]
except etcd.EtcdKeyNotFound:
pass
return 0
def get_run_id(self) -> str:
return self._rdzv_impl._run_id
def shutdown(self) -> bool:
try:
self.set_closed()
return True
except BaseException as e:
log.warning(f"Shutdown failed. Error occurred: {str(e)}")
return False
# TODO: we should probably handle a few additional errors,
# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are
# only relevant for multi-node Etcd ensemble. A simple retry would work,
# but is verbose to add everywhere. Consider wrapping the client calls
# into auto-retry for these errors?
#
class EtcdRendezvous(object):
"""
A rendezvous implementation that uses `etcd <https://etcd.io/>`__ as
the backend store.
"""
def __init__(
self,
client,
prefix,
run_id,
num_min_workers,
num_max_workers,
timeout,
last_call_timeout,
):
self.client = client
log.info("Etcd machines: " + str(self.client.machines))
self._prefix = prefix
self._run_id = run_id
self._num_min_workers = num_min_workers
self._num_max_workers = num_max_workers
self._timeout = timeout
self._last_call_timeout = last_call_timeout
# For cleaning up TTL refresher threads (for ephemeral keys)
self._lease_run_id_stop = None
self._lease_this_rank_stop = None
if not self._prefix.endswith("/"):
self._prefix += "/"
# Setup a permanent prefix dir, if didn't exist
if self._prefix != "/":
self.create_path_if_not_exists(self._prefix)
# Lease a "sub-root" node specific to this job instance (run_id)
self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL)
self._lease_run_id_stop = self.setup_lease_renewal(
self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL
)
# Subdir for all rendezvous work
self.create_path_if_not_exists(self.get_path("/rdzv"))
# Create a rendezvous version counter, if doesn't exist
try:
self.client.write(
key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False
)
except etcd.EtcdAlreadyExist:
pass
def __del__(self):
# TODO: look into using weakref here instead.
if self._lease_run_id_stop is not None:
self._lease_run_id_stop.set()
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
def rendezvous_barrier(self):
"""
Main entry point for next rendezvous.
This method is blocking until rendezvous succeeds or a timeout occurs.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousTimeoutError - timeout waiting for rendezvous
RendezvousClosedError - rendezvous is or was closed while waiting
RendezvousError - other persistent errors that
render the rendezvous non-retryable
"""
self._rendezvous_deadline = time.time() + self._timeout
while True:
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
log.info("Attempting to join next rendezvous")
try:
# Dis-own our lease in the previous rendezvous, if exists
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
return self.init_phase()
except EtcdRendezvousRetryImmediately:
# The type of failure suggests we can retry without delay
pass
except EtcdRendezvousRetryableFailure:
# In case of retryable failure, wait a small delay
# to avoid spamming etcd
time.sleep(1)
except RendezvousTimeoutError:
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
raise
except RendezvousClosedError:
log.info(
f"Rendezvous for run_id={self._run_id} was observed to be closed"
)
raise
except RendezvousError:
raise
except Exception as e:
# In case of a general exception, wait a small delay
# to avoid spamming etcd
# FIXME: there are a few things that fall under this like
# etcd.EtcdKeyNotFound, etc, which could be handled more explicitly.
log.info("Rendezvous attempt failed, will retry. Reason: " + str(e))
time.sleep(1)
def init_phase(self):
"""
Initially, the rendezvous state is expected to be one of:
1. empty (non-existent) - in this case we try to create a new one.
2. joinable - we try to join it.
3. final - we announce ourselves as waiting, and go into monitoring mode
Any other state is considered transitional, and will be retried after
a short delay.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousClosedError - current rendezvous was/is closed
EtcdRendezvousRetryableFailure - observed some intermediate
state, which is best handled by retrying later
"""
try:
active_version = self.try_create_rendezvous()
state = json.loads(active_version.value)
log.info("New rendezvous state created: " + str(state))
except etcd.EtcdAlreadyExist:
active_version, state = self.get_rdzv_state()
# Note: it is possible for above query to fail (etcd.EtcdKeyNotFound),
# but this is ok for us - just means we'll restart from beginning.
log.info("Observed existing rendezvous state: " + str(state))
if state["status"] == "closed":
raise RendezvousClosedError()
if state["status"] == "joinable":
return self.join_phase(state["version"])
if state["status"] == "final":
self.handle_existing_rendezvous(state["version"])
raise EtcdRendezvousRetryImmediately()
self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)
raise EtcdRendezvousRetryableFailure()
def join_phase(self, expected_version):
"""
We observed a rendezvous state in 'joinable' state, and attempt to join this
particular version, and then wait for all other peers to join.
"""
# Failure to join will propagate an exception, causing a re-entry.
active_version, this_rank = self.join_rendezvous(expected_version)
state = json.loads(active_version.value)
log.info(
"Joined rendezvous version {} as rank {}. Full state: {}".format(
state["version"], this_rank, state
)
)
# If this worker was first to reach num_min_workers requirement,
# and rendezvous is still joinable (therefore it is elastic),
# then this worker will be repsonsible for waiting out the "last call"
# timeout and closing (i.e. transitioning to 'frozen') the rendezvous
# afterwards.
# As a safety against a potential failure of this worker (during the
# last call timeout), the rendezvous state is made ephemeral
# when min_num_workers is reached.
if this_rank == self._num_min_workers - 1 and state["status"] == "joinable":
log.info("Rank {} is responsible for join last call.".format(this_rank))
last_call_deadline = time.time() + self._last_call_timeout
self.handle_join_last_call(expected_version, last_call_deadline)
log.info("Rank {} finished join last call.".format(this_rank))
# Wait for rendezvous state to be frozen, which means a fixed set of peers
log.info("Waiting for remaining peers.")
active_version = self.wait_for_peers(expected_version)
state = json.loads(active_version.value)
assert (
state["version"] == expected_version
), "Logic error: failed to observe version mismatch"
return self.confirm_phase(expected_version, this_rank)
def confirm_phase(self, expected_version, this_rank):
"""
Once the rendezvous state trainsitions from 'joinable' to 'frozen',
we have every participant confirm their membership and setup per-member
keep-alive TTL keys, and then wait for all other participants to confirm,
which would then successfully conclude this rendezvous.
"""
log.info("All peers arrived. Confirming membership.")
self.confirm_membership(expected_version, this_rank)
log.info("Waiting for confirmations from all peers.")
active_version = self.wait_for_final(expected_version)
state = json.loads(active_version.value)
log.info(
"Rendezvous version {} is complete. Final state: {}".format(
state["version"], state
)
)
# Rendezvous version number; our rank in it; world size
return state["version"], this_rank, len(state["participants"])
def handle_existing_rendezvous(self, expected_version):
"""
Handle the case when there's an existing (state 'final) rendezvous already
in place, and we have to announce ourselves waiting, and wait until
the next rendezvous opportunity.
"""
# If state is 'final' -> increment num_workers_waiting
# Then, observe state changes:
# 1. if it's no longer final -> bail out and re-try
# 2. if keep alives are missing, destroy it and bail out.
active_state = self.announce_self_waiting(expected_version)
log.info(
"Added self to waiting list. Rendezvous full state: {}".format(
active_state.value
)
)
self.wait_for_rendezvous_to_free(expected_version)
log.info("Previously existing rendezvous state changed. Will re-try joining.")
def try_create_rendezvous(self):
"""
Create new rendezvous state or raise an exception that indicates
an unexpected state (e.g. already exists)
Raises:
RendezvousError - on unexpected state
"""
# Initially active_version is ephemeral - this is to handle the
# possibility that might fail to complete the setup transaction,
# i.e. the transition "setup" -> "joinable".
active_version = self.client.write(
key=self.get_path("/rdzv/active_version"),
value=json.dumps({"status": "setup"}),
prevExist=False,
ttl=CONST_ETCD_SETUP_TTL,
)
try:
version_counter = self.client.get(self.get_path("/rdzv/version_counter"))
version_counter.value = str(int(version_counter.value) + 1)
self.client.update(version_counter)
except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed):
raise RendezvousError(
"Unexpected state of EtcdRendezvousHandler, worker needs to die."
)
# Any failure below results in declaring a retryable rendezvous failure.
# The ephemeral /rdzv/active_version will expire and someone can then
# re-try the setup process.
# Create directory node for participant data
self.client.write(
key=self.get_path("/rdzv/v_{}".format(version_counter.value)),
value=None,
dir=True,
prevExist=False,
)
# Publish rendezvous version and signal it is ready-to-be-joined.
# If rendezvous was set closed just before this, a retry will happen,
# where the closed condition will be handled.
return self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(
{
"status": "joinable",
"version": version_counter.value,
"participants": [],
}
),
prev_value=active_version.value,
)
def join_rendezvous(self, expected_version):
"""
Helper method for the join phase.
"""
# Use compare-and-swap to add self to rendezvous state:
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "joinable":
raise EtcdRendezvousRetryableFailure(
"Rendezvous state became non-joinable before we could join. "
"Must join next one."
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
assert (
len(state["participants"]) < self._num_max_workers
), "Logic error: joinable rendezvous should always have space left"
this_rank = len(state["participants"])
state["participants"].append(this_rank)
# When reaching min workers, or changing state to frozen, we'll set
# the active_version node to be ephemeral.
set_ttl: Optional[int] = None
if len(state["participants"]) == self._num_max_workers:
state["status"] = "frozen"
state["keep_alives"] = []
set_ttl = CONST_ETCD_FROZEN_TTL
elif len(state["participants"]) >= self._num_min_workers:
set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL
try:
# Compare-and-swap.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=set_ttl,
)
# We succeeded joining.
return active_version, this_rank
except etcd.EtcdCompareFailed:
log.info("Join rendezvous CAS unsuccessful, retrying")
def wait_for_peers(self, expected_version):
"""
Helper method for the join phase.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Success, all peers arrived.
return active_version
elif state["status"] == "joinable" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def confirm_membership(self, expected_version, this_rank):
"""
Helper method for the confirm phase
"""
# Compare-and-swap loop
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "frozen":
raise EtcdRendezvousRetryImmediately(
"Rendezvous no longer frozen, before we confirmed. "
"Must join next one"
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
this_lease_key = self.get_path(
"/rdzv/v_{}/rank_{}".format(expected_version, this_rank)
)
self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL)
state["keep_alives"].append(this_lease_key)
if len(state["keep_alives"]) == len(state["participants"]):
# Everyone confirmed (this rank is last to do so)
state["status"] = "final"
state["num_workers_waiting"] = 0
finalize = True
else:
finalize = False
try:
# Compare-and-swap. If new state is still frozen, keep it ephemeral.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=None if finalize else CONST_ETCD_FROZEN_TTL,
)
self._lease_this_rank_stop = self.setup_lease_renewal(
this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Confirm membership CAS unsuccessful, retrying")
def wait_for_final(self, expected_version):
"""
Helper method for the confirm phase
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "final" and state["version"] == expected_version:
# Succcess. This rendezvous is final, and we accept it.
return active_version
elif state["status"] == "frozen" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def announce_self_waiting(self, expected_version):
"""
Announce this worker is waiting (via num_workers_waiting counter) to join next
rendezvous, but only if state and version match.
"""
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "final" or state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately()
# Increment counter to signal an additional waiting worker.
state["num_workers_waiting"] += 1
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Announce self as waiting CAS unsuccessful, retrying")
def wait_for_rendezvous_to_free(self, expected_version):
"""
When there's an existing valid rendezvous in state 'final', we have to
wait until the next opportunity to join.
Such opportunity may come from:
1. rendezvous state changed by someone else, in which case we unblock and retry.
2. rendezvous becomes invalid because at least one member failed to renew their
leased keep_alive node. We detect this, and destroy the rendezvous.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] != "final" or state["version"] != expected_version:
return
# Check if current rendezvous state is valid, in the sense that all
# its members are alive (renewing their lease).
# If not, try destroy this rendezvous, so a new one can be created.
alive_members = self.client.get(
self.get_path("/rdzv/v_{version}".format(version=expected_version))
)
keep_alive_keys = [ch.key for ch in alive_members.children]
for key in state["keep_alives"]:
if key not in keep_alive_keys:
# This participant didn't renew their lease. We'll declare this
# rendezvous version as dead (but only if it hadn't changed)
log.info("Keep-alive key {} is not renewed.".format(key))
log.info(
"Rendevous version {} is incomplete. ".format(expected_version)
)
log.info("Attempting to destroy it.")
# Compare-and-delete operation. Throws if compare failed,
# which means rendezvous was already destroyed/re-created/closed,
# and we can try to re-enter the barrier.
self.client.delete(
key=self.get_path("/rdzv/active_version"),
prevValue=active_version.value,
)
log.info(
"Destroyed rendezvous version {} successfully.".format(
expected_version
)
)
# We can return (and retry) immediately
return
# Existing rendezvous seems valid, no reason to destroy it.
# We just have to wait until something changes and re-check.
try:
overall_timeout = (
max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
)
self.client.watch(
key=self.get_path("/rdzv"),
index=active_version.etcd_index + 1,
recursive=True,
timeout=overall_timeout,
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
active_version, state = self.get_rdzv_state()
def handle_join_last_call(self, expected_version, deadline):
"""
After we reach min number of workers, one particular worker takes on the
responsibility of waiting an additional timeout before closing the join window.
If the worker responsible for this fails, the rendezvous will be destroyed due
to expiring TTL, and the other participants will re-rendezvous.
Here we expect to see state <joinable, expected_version>
Exit gracefully if either:
1. state becomes <frozen, expected_version>
2. timeout happens (reaching deadline), in which case
we try the tranisiton to <frozen, expected_version>
Exit with exception otherwise.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Worker set became frozen before last-call timeout. This is possible
# when num_max_workers is reached before the tiemout.
return
if state["status"] != "joinable" or state["version"] != expected_version:
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
# If timeout occurred, attempt a state transition (joinable -> frozen)
if time.time() >= deadline:
state["status"] = "frozen"
state["keep_alives"] = []
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=CONST_ETCD_FROZEN_TTL,
)
# We successfully made this rendezvous frozen.
return
except etcd.EtcdCompareFailed:
log.info("Join last-call transition CAS unsuccessful. Will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
continue
# Timeout did not occur, so we must refresh TTL, and wait for
# further changes. Note: we only want TTL to be refreshed if
# state is still joinable, hence we use CAS for that here,
# even though we don't change any of the data.
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=active_version.value,
prev_value=active_version.value,
ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL,
)
# Minimize "oversleeping":
timeout = min(
CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2,
deadline - time.time() + 1.0, # Oversleeping by 1s is ok.
)
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1, timeout=timeout
)
except etcd.EtcdCompareFailed:
log.info("Join last-call TTL refresh CAS unsuccessful, will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
def set_closed(self):
"""
Mark rendezvous 'closed' for current run_id, which is used to signal other
participants to not attempt to perform (re-)rendezvous. This is useful
when one of the workers decides the job is complete.
"""
while True:
active_version, state = self.get_rdzv_state()
if state["status"] == "closed":
# Already closed by someone else.
return
state["status"] = "closed"
try:
self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Set closed CAS unsuccessful, retrying")
cas_delay()
def get_rdzv_state(self):
active_version = self.client.get(key=self.get_path("/rdzv/active_version"))
return active_version, json.loads(active_version.value)
def try_wait_for_state_change(self, etcd_index, timeout=None):
# Don't sleep past the overall deadline (at least more than by 1s)
overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
timeout = overall_timeout if timeout is None else min(timeout, overall_timeout)
try:
self.client.watch(
self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
# Unfortunately, we have to do another fetch in order to get last etcd_index.
return self.get_rdzv_state()
def get_path(self, path):
if not path.startswith("/"):
path = "/" + path
return "{prefix}run_{run_id}{path}".format(
prefix=self._prefix, run_id=self._run_id, path=path
)
def create_path_if_not_exists(self, full_path, ttl=None):
try:
self.client.write(
key=full_path, value=None, dir=True, prevExist=False, ttl=ttl
)
except etcd.EtcdAlreadyExist:
pass
def setup_lease_renewal(self, full_path, ttl):
# NOTE: For ephemeral key TTL renewal (~lease) to work correctly,
# make sure you don't call any long-blocking methods that do not
# release the Python's GIL! An example of this is calling a pybind11
# extension function that is blocking / long-running, but is not
# doing a scoped release of the GIL.
def lease_worker(client, path, ttl, stop_event):
while True:
try:
client.refresh(path, ttl=ttl)
except etcd.EtcdKeyNotFound:
break
except ConnectionRefusedError:
# This error usually occurs during test when the server already got terminated but the
# python garbage collector have not yet invoked the __del__ method.
break
if stop_event.wait(timeout=ttl / 2):
break
lease_stop_event = threading.Event()
lease_thread = threading.Thread(
target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event)
)
lease_thread.daemon = True
lease_thread.start()
return lease_stop_event
def store_extra_data(self, rdzv_version, key, value):
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
try:
# If first time we are storing anything:
extra_data = self.client.write(
key=node, value=json.dumps({key: value}), prevExist=False
)
return
except etcd.EtcdAlreadyExist:
pass
# CAS loop, to make sure we don't lose concurrent stores.
while True:
# We never delete extra_data. Failure here should be fatal, no special handling.
extra_data = self.client.get(node)
new_extra_data_value = json.loads(extra_data.value)
new_extra_data_value[key] = value
try:
extra_data = self.client.test_and_set(
key=node,
value=json.dumps(new_extra_data_value),
prev_value=extra_data.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Store extra_data CAS unsuccessful, retrying")
time.sleep(0.1)
def load_extra_data(self, rdzv_version, key, timeout=None):
# 'extra_data' node itself, and the directory it is located in:
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
node_dir = self.get_path("/rdzv/v_{}".format(rdzv_version))
# TODO: implement timeout
# https://github.com/pytorch/elastic/issues/12
while True:
# Combined wait for the node itself, and the key inside it.
root = self.client.get(node_dir)
# Find the extra_data node, if it exists
extra_data = [n for n in root.children if n.key == node]
assert len(extra_data) <= 1
# Node for extra_data exists, check the desired key inside it.
if len(extra_data) == 1:
extra_data_dict = json.loads(extra_data[0].value)
if key in extra_data_dict:
return extra_data_dict[key]
# The 'extra_data' node doesn't exist, or they key isn't published yet.
# Wait for interesting events on the extra_data node and retry.
try:
self.client.watch(node, index=root.etcd_index + 1)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
def setup_kv_store(self, rdzv_version):
store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv")
self.create_path_if_not_exists(store_path)
return EtcdStore(etcd_client=self.client, etcd_store_prefix=store_path)
def _create_etcd_client(params: RendezvousParameters) -> etcd.Client:
"""
Creates a new ``etcd.Client`` from the specified ``RendezvousParameters``.
"""
hostname, port = _parse_rendezvous_endpoint(params.endpoint, 2379)
# The communication protocol
protocol = params.config.get("protocol")
if protocol is None:
protocol = "http"
else:
if protocol != "http" and protocol != "https":
raise ValueError("The etcd protocol must be HTTP or HTTPS.")
# The SSL client certificate
ssl_cert = params.config.get("cert")
if ssl_cert is not None:
cert_key = params.config.get("key")
if cert_key is not None:
# The etcd client expects the certificate key as the second element
# of the `cert` tuple.
ssl_cert = (ssl_cert, cert_key)
# The root certificate
ca_cert = params.config.get("cacert")
return etcd.Client(
hostname,
port,
protocol=protocol,
cert=ssl_cert,
ca_cert=ca_cert,
allow_reconnect=True,
)
# Handler for torch.distributed "static" registration
def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler:
"""
Usage:
::
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8,
timeout=300,
last_call_timeout=30,
etcd_prefix="custom_prefix",
protocol="https",
cacert="/etc/kubernetes/certs/ca.crt",
cert="/etc/kubernetes/certs/client.crt",
key="/etc/kubernetes/certs/client.key")
# -- or --
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8)
etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params)
Where:
run_id - unique id for this training job instance,
min_nodes - min number of workers expected to join the rendezvous,
max_nodes - max number of workers allowed to join the rendezvous,
defaults to min_workers is not specified.
timeout - total timeout within which next_rendezvous is expected to
succeed; a RendezvousTimeoutError is raised otherwise;
Defaults is 600 (10 minutes).
last_call_timeout - additional wait amount ("last call") after
min number of workers has been reached.
Defaults to 30 seconds.
etcd_prefix - path prefix (from etcd root), inside which all
etcd nodes will be created.
Default is "/torchelastic/p2p".
protocol - http (default) or https to access etcd.
cacert - CA cert to access etcd, only makes sense with https.
cert - client cert to access etcd, only makes sense with https.
key - client key to access etcd, only makes sense with https.
"""
client = _create_etcd_client(params)
etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p")
rdzv = EtcdRendezvous(
client=client,
prefix=etcd_prefix,
run_id=params.run_id,
num_min_workers=params.min_nodes,
num_max_workers=params.max_nodes,
timeout=params.get_as_int("timeout", _DEFAULT_TIMEOUT),
last_call_timeout=params.get_as_int("last_call_timeout", _DEFAULT_LAST_CALL_TIMEOUT),
)
return EtcdRendezvousHandler(rdzv_impl=rdzv)
|
different.py
|
import numpy as np
from scipy.special import gamma, digamma
import numpy.linalg as la
from scipy import stats
import multiprocessing as mp
from multiprocessing.managers import BaseManager, SyncManager
import itertools
import signal
def entropy(X, method='auto', **kwargs):
if method == 'auto':
d = X.shape[1]
if d <= 5:
return kde_entropy(X, **kwargs)
else:
return knn_entropy(X, **kwargs)
elif method == 'knn':
return knn_entropy(X, **kwargs)
elif method == 'gauss_kde':
return kde_entropy(X, **kwargs)
def knn_entropy(X, **kwargs):
"""
Nearest neighbor entropy estimator
:param X: (n_samples, n_dimension) ndarray of samples
"param subsample: if provided, run estimation on a random N subsample of X
:return: entropy of X
"""
if 'subsample' in kwargs and kwargs['subsample'] > X.shape[0]:
raise Exception("subsample size is larger than number of samples in X")
X = resample(X, kwargs['subsample']) if 'subsample' in kwargs else X
nth = kwargs['nth'] if 'nth' in kwargs else 1
r = kwargs['r'] if 'r' in kwargs else np.inf
trunc_fract = kwargs['trunc_fract'] if 'trunc_fract' in kwargs else 1.0
trunc_n = kwargs['trunc_n'] if 'trunc_n' in kwargs else X.shape[0]
parallel = kwargs['parallel'] if 'parallel' in kwargs else False
d = X.shape[1]
X = X[:trunc_n]
X = X[:int(X.shape[0]*trunc_fract)]
X = X[0::nth]
X = bound(X, r)
neighbors = brute_neighbors(X, parallel=parallel)
d = X.shape[1]
vol = np.pi**(d/2)/gamma(d/2 + 1)
accum = 0
N = neighbors.shape[0]
C = np.log((vol * (N-1))/np.exp(digamma(1)))
return np.sum(d * np.log(neighbors[:, 1]) + C) / N
def kde_entropy(X, **kwargs):
"""
Entropy estimator using Gaussian kernel density estimation
and Monte-Carlo integral estimation
"""
if 'subsample' in kwargs and kwargs['subsample'] > X.shape[0]:
raise Exception("subsample size is larger than number of samples in X")
X = resample(X, kwargs['subsample']) if 'subsample' in kwargs else X
N = kwargs['n'] if 'n' in kwargs else 10000
r = kwargs['r'] if 'r' in kwargs else 10
if 'r' in kwargs:
r = kwargs['r']
p = stats.gaussian_kde(X.T)
def entropy_func(x):
return p(x) * np.log(p(x)+0.000001)
samples = np.random.uniform(-r, r, size=(N, p.d))
vals = entropy_func(samples.T)
H = -((2 * r) ** p.d * np.nanmean(vals))
return H
def resample(X, n):
if n > X.shape[0]:
raise Exception("subsample size is larger than number of samples in X")
samples = np.random.choice(
X.shape[0], replace=False, size=n)
X = X[samples, :]
return X
def brute_neighbors(X, parallel=True, n=mp.cpu_count()):
neighbors = np.ndarray(shape=(X.shape[0], 2))
# neighbors = []
if parallel:
neighbors = parallel_neighbors(X, n)
return neighbors
# just need to brute force nearest neighbor search
# because the dimensionality is so high.
# KDTree is not significantly faster
for i in range(X.shape[0]):
nearest = (0, 1000000000)
for j in range(X.shape[0]):
if j == i:
continue
d = la.norm((X[i] - X[j]))
if d < nearest[1]:
if d == 0:
continue
# print()
nearest = (j, d)
# neighbors.append(nearest)
neighbors[i] = nearest
return neighbors
def parallel_neighbors(X, n=mp.cpu_count()):
manager = SyncManager()
manager.start(mgr_init)
state = manager.dict()
Xs = np.array_split(X, n)
procs = []
for i, x in enumerate(Xs):
# print(x.shape)
p = mp.Process(target=par_neighb, args=(x, X, i, state))
procs.append(p)
p.start()
# print(len(procs))
for p in procs:
p.join()
# print(np.vstack(state.values()).shape)
# print(state)
# print([x.shape for x in state.values()])
return np.vstack(state.values())
def par_neighb(Xs, X, idx, state):
# neighbors = []
neighbors = np.ndarray(shape=(Xs.shape[0], 2))
for i in range(Xs.shape[0]):
nearest = (0, 1000000000)
for j in range(X.shape[0]):
if j == i:
continue
d = la.norm((Xs[i] - X[j]))
if d < nearest[1]:
if d == 0:
continue
nearest = (j, d)
# neighbors.append(nearest)
neighbors[i] = nearest
# print(neighbors[:4])
# print()
state[idx] = neighbors
def mgr_init():
# signal.signal(signal.SIGINT, mgr_sig_handler)
# <- OR do this to just ignore the signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def bound(X, r):
"""
:param X: (n_samples, n_dimensions) array of samples
:param r: the radius to bound by
:return: a filtered X containing only those pionts
within an r*r n-cube about the origin
"""
d = X.shape[0]
inbnd = np.all(np.logical_and(-r <= X, X <= r), axis=1)
return X[inbnd]
def lddp(X, r=5, **kwargs):
"""
Compute the limiting density of discrete points estimate
of a distribution of empirical samples. m(x) is taken to be
the uniform distribution over the support.
:param X: (n_samples, n_dimensions) array of samples
:param r: the radius to bound by
:param kwargs:
:return: the lddp estimate of the distribution
"""
d = X.shape[1]
H = entropy(X, r=r, **kwargs)
c = np.log((1/(2*r))**d)
return H - c
def support_bounds(X):
bounds = [(np.min(x), np.max(x)) for x in X]
b = np.max(np.abs(bounds))
return b
|
test_host_connection_pool.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import Mock, NonCallableMagicMock
from threading import Thread, Event, Lock
from cassandra.cluster import Session
from cassandra.connection import Connection
from cassandra.pool import Host, HostConnectionPool, NoConnectionsAvailable
from cassandra.policies import HostDistance, SimpleConvictionPolicy
class HostConnectionPoolTests(unittest.TestCase):
def make_session(self):
session = NonCallableMagicMock(spec=Session, keyspace='foobarkeyspace')
session.cluster.get_core_connections_per_host.return_value = 1
session.cluster.get_max_requests_per_connection.return_value = 1
session.cluster.get_max_connections_per_host.return_value = 1
return session
def test_borrow_and_return(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
c, request_id = pool.borrow_connection(timeout=0.01)
self.assertIs(c, conn)
self.assertEqual(1, conn.in_flight)
conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace')
pool.return_connection(conn)
self.assertEqual(0, conn.in_flight)
self.assertNotIn(conn, pool._trash)
def test_failed_wait_for_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
self.assertEqual(1, conn.in_flight)
conn.in_flight = conn.max_request_id
# we're already at the max number of requests for this connection,
# so we this should fail
self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0)
def test_successful_wait_for_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock())
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
self.assertEqual(1, conn.in_flight)
def get_second_conn():
c, request_id = pool.borrow_connection(1.0)
self.assertIs(conn, c)
pool.return_connection(c)
t = Thread(target=get_second_conn)
t.start()
pool.return_connection(conn)
t.join()
self.assertEqual(0, conn.in_flight)
def test_all_connections_trashed(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock())
session.cluster.connection_factory.return_value = conn
session.cluster.get_core_connections_per_host.return_value = 1
# manipulate the core connection setting so that we can
# trash the only connection
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.get_core_connections_per_host.return_value = 0
pool._maybe_trash_connection(conn)
session.cluster.get_core_connections_per_host.return_value = 1
submit_called = Event()
def fire_event(*args, **kwargs):
submit_called.set()
session.submit.side_effect = fire_event
def get_conn():
conn.reset_mock()
c, request_id = pool.borrow_connection(1.0)
self.assertIs(conn, c)
self.assertEqual(1, conn.in_flight)
conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace')
pool.return_connection(c)
t = Thread(target=get_conn)
t.start()
submit_called.wait()
self.assertEqual(1, pool._scheduled_for_creation)
session.submit.assert_called_once_with(pool._create_new_connection)
# now run the create_new_connection call
pool._create_new_connection()
t.join()
self.assertEqual(0, conn.in_flight)
def test_spawn_when_at_max(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100)
conn.max_request_id = 100
session.cluster.connection_factory.return_value = conn
# core conns = 1, max conns = 2
session.cluster.get_max_connections_per_host.return_value = 2
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
self.assertEqual(1, conn.in_flight)
# make this conn full
conn.in_flight = conn.max_request_id
# we don't care about making this borrow_connection call succeed for the
# purposes of this test, as long as it results in a new connection
# creation being scheduled
self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0)
session.submit.assert_called_once_with(pool._create_new_connection)
def test_return_defunct_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False,
max_request_id=100, signaled_error=False)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
conn.is_defunct = True
session.cluster.signal_connection_failure.return_value = False
pool.return_connection(conn)
# the connection should be closed a new creation scheduled
self.assertTrue(session.submit.call_args)
self.assertFalse(pool.is_shutdown)
def test_return_defunct_connection_on_down_host(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False,
max_request_id=100, signaled_error=False)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
conn.is_defunct = True
session.cluster.signal_connection_failure.return_value = True
pool.return_connection(conn)
# the connection should be closed a new creation scheduled
self.assertTrue(session.cluster.signal_connection_failure.call_args)
self.assertTrue(conn.close.call_args)
self.assertFalse(session.submit.called)
self.assertTrue(pool.is_shutdown)
def test_return_closed_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True, max_request_id=100, signaled_error=False)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
conn.is_closed = True
session.cluster.signal_connection_failure.return_value = False
pool.return_connection(conn)
# a new creation should be scheduled
self.assertTrue(session.submit.call_args)
self.assertFalse(pool.is_shutdown)
def test_host_instantiations(self):
"""
Ensure Host fails if not initialized properly
"""
self.assertRaises(ValueError, Host, None, None)
self.assertRaises(ValueError, Host, '127.0.0.1', None)
self.assertRaises(ValueError, Host, None, SimpleConvictionPolicy)
def test_host_equality(self):
"""
Test host equality has correct logic
"""
a = Host('127.0.0.1', SimpleConvictionPolicy)
b = Host('127.0.0.1', SimpleConvictionPolicy)
c = Host('127.0.0.2', SimpleConvictionPolicy)
self.assertEqual(a, b, 'Two Host instances should be equal when sharing.')
self.assertNotEqual(a, c, 'Two Host instances should NOT be equal when using two different addresses.')
self.assertNotEqual(b, c, 'Two Host instances should NOT be equal when using two different addresses.')
|
curses_menu.py
|
import curses
import os
import platform
import threading
class CursesMenu(object):
"""
A class that displays a menu and allows the user to select an option
:cvar CursesMenu cls.currently_active_menu: Class variable that holds the currently active menu or None if no menu\
is currently active (E.G. when switching between menus)
"""
currently_active_menu = None
stdscr = None
def __init__(self, title=None, subtitle=None, show_exit_option=True):
"""
:ivar str title: The title of the menu
:ivar str subtitle: The subtitle of the menu
:ivar bool show_exit_option: Whether this menu should show an exit item by default. Can be overridden \
when the menu is started
:ivar items: The list of MenuItems that the menu will display
:vartype items: list[:class:`MenuItem<cursesmenu.items.MenuItem>`]
:ivar CursesMenu parent: The parent of this menu
:ivar CursesMenu previous_active_menu: the previously active menu to be restored into the class's \
currently active menu
:ivar int current_option: The currently highlighted menu option
:ivar MenuItem current_item: The item corresponding to the menu option that is currently highlighted
:ivar int selected_option: The option that the user has most recently selected
:ivar MenuItem selected_item: The item in :attr:`items` that the user most recently selected
:ivar returned_value: The value returned by the most recently selected item
:ivar screen: the curses window associated with this menu
:ivar normal: the normal text color pair for this menu
:ivar highlight: the highlight color pair associated with this window
"""
self.screen = None
self.highlight = None
self.normal = None
self.title = title
self.subtitle = subtitle
self.show_exit_option = show_exit_option
self.items = list()
self.parent = None
self.exit_item = ExitItem(menu=self)
self.current_option = 0
self.selected_option = -1
self.returned_value = None
self.should_exit = False
self.previous_active_menu = None
self._main_thread = None
self._running = threading.Event()
def __repr__(self):
return "%s: %s. %d items" % (self.title, self.subtitle, len(self.items))
@property
def current_item(self):
"""
:rtype: MenuItem|None
"""
if self.items:
return self.items[self.current_option]
else:
return None
@property
def selected_item(self):
"""
:rtype: MenuItem|None
"""
if self.items and self.selected_option != -1:
return self.items[self.current_option]
else:
return None
def append_item(self, item):
"""
Add an item to the end of the menu before the exit item
:param MenuItem item: The item to be added
"""
did_remove = self.remove_exit()
item.menu = self
self.items.append(item)
if did_remove:
self.add_exit()
if self.screen:
max_row, max_cols = self.screen.getmaxyx()
if max_row < 6 + len(self.items):
self.screen.resize(6 + len(self.items), max_cols)
self.draw()
def add_exit(self):
"""
Add the exit item if necessary. Used to make sure there aren't multiple exit items
:return: True if item needed to be added, False otherwise
:rtype: bool
"""
if self.items:
if self.items[-1] is not self.exit_item:
self.items.append(self.exit_item)
return True
return False
def remove_exit(self):
"""
Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else
:return: True if item needed to be removed, False otherwise
:rtype: bool
"""
if self.items:
if self.items[-1] is self.exit_item:
del self.items[-1]
return True
return False
def _wrap_start(self):
if self.parent is None:
curses.wrapper(self._main_loop)
else:
self._main_loop(None)
CursesMenu.currently_active_menu = None
if self.stdscr:
self.stdscr.clear()
else:
self.clear_screen()
os.system('clear')
CursesMenu.currently_active_menu = self.previous_active_menu
def start(self, show_exit_option=None):
"""
Start the menu in a new thread and allow the user to interact with it.
The thread is a daemon, so :meth:`join()<cursesmenu.CursesMenu.join>` should be called if there's a possibility\
that the main thread will exit before the menu is done
:param bool show_exit_option: Whether the exit item should be shown, defaults to\
the value set in the constructor
"""
self.previous_active_menu = CursesMenu.currently_active_menu
CursesMenu.currently_active_menu = None
self.should_exit = False
if show_exit_option is None:
show_exit_option = self.show_exit_option
if show_exit_option:
self.add_exit()
else:
self.remove_exit()
try:
self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)
except TypeError:
self._main_thread = threading.Thread(target=self._wrap_start)
self._main_thread.daemon = True
self._main_thread.start()
def show(self, show_exit_option=None):
"""
Calls start and then immediately joins.
:param bool show_exit_option: Whether the exit item should be shown, defaults to the value set \
in the constructor
"""
self.start(show_exit_option)
self.join()
def _main_loop(self, scr):
if scr is not None:
CursesMenu.stdscr = scr
self.screen = curses.newpad(len(self.items) + 6, CursesMenu.stdscr.getmaxyx()[1])
self._set_up_colors()
curses.curs_set(0)
CursesMenu.stdscr.refresh()
self.draw()
CursesMenu.currently_active_menu = self
self._running.set()
while self._running.wait() is not False and not self.should_exit:
self.process_user_input()
def draw(self):
"""
Redraws the menu and refreshes the screen. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.border(0)
if self.title is not None:
self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)
if self.subtitle is not None:
self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD)
for index, item in enumerate(self.items):
if self.current_option == index:
text_style = self.highlight
else:
text_style = self.normal
self.screen.addstr(5 + index, 4, item.show(index), text_style)
screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()
top_row = 0
if 6 + len(self.items) > screen_rows:
if screen_rows + self.current_option < 6 + len(self.items):
top_row = self.current_option
else:
top_row = 6 + len(self.items) - screen_rows
self.screen.refresh(top_row, 0, 0, 0, screen_rows - 1, screen_cols - 1)
def is_running(self):
"""
:return: True if the menu is started and hasn't been paused
"""
return self._running.is_set()
def wait_for_start(self, timeout=None):
"""
Block until the menu is started
:param timeout: How long to wait before timing out
:return: False if timeout is given and operation times out, True otherwise. None before Python 2.7
"""
return self._running.wait(timeout)
def is_alive(self):
"""
:return: True if the thread is still alive, False otherwise
"""
return self._main_thread.is_alive()
def pause(self):
"""
Temporarily pause the menu until resume is called
"""
self._running.clear()
def resume(self):
"""
Sets the currently active menu to this one and resumes it
"""
CursesMenu.currently_active_menu = self
self._running.set()
def join(self, timeout=None):
"""
Should be called at some point after :meth:`start()<cursesmenu.CursesMenu.start>` to block until the menu exits.
:param Number timeout: How long to wait before timing out
"""
self._main_thread.join(timeout=timeout)
def get_input(self):
"""
Can be overridden to change the input method.
Called in :meth:`process_user_input()<cursesmenu.CursesMenu.process_user_input>`
:return: the ordinal value of a single character
:rtype: int
"""
return CursesMenu.stdscr.getch()
def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input()
go_to_max = ord("9") if len(self.items) >= 9 else ord(str(len(self.items)))
if ord('1') <= user_input <= go_to_max:
self.go_to(user_input - ord('0') - 1)
elif user_input == curses.KEY_DOWN:
self.go_down()
elif user_input == curses.KEY_UP:
self.go_up()
elif user_input == ord("\n"):
self.select()
return user_input
def go_to(self, option):
"""
Go to the option entered by the user as a number
:param option: the option to go to
:type option: int
"""
self.current_option = option
self.draw()
def go_down(self):
"""
Go down one, wrap to beginning if necessary
"""
if self.current_option < len(self.items) - 1:
self.current_option += 1
else:
self.current_option = 0
self.draw()
def go_up(self):
"""
Go up one, wrap to end if necessary
"""
if self.current_option > 0:
self.current_option += -1
else:
self.current_option = len(self.items) - 1
self.draw()
def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
if not self.should_exit:
self.draw()
def exit(self):
"""
Signal the menu to exit, then block until it's done cleaning up
"""
self.should_exit = True
self.join()
def _set_up_colors(self):
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.highlight = curses.color_pair(1)
self.normal = curses.A_NORMAL
def clear_screen(self):
"""
Clear the screen belonging to this menu
"""
if self.stdscr:
self.stdscr.clear()
self.stdscr.refresh()
self.screen.clear()
class MenuItem(object):
"""
A generic menu item
"""
def __init__(self, text, menu=None, should_exit=False):
"""
:ivar str text: The text shown for this menu item
:ivar CursesMenu menu: The menu to which this item belongs
:ivar bool should_exit: Whether the menu should exit once this item's action is done
"""
self.text = text
self.menu = menu
self.should_exit = should_exit
def __str__(self):
return "%s %s" % (self.menu.title, self.text)
def show(self, index):
"""
How this item should be displayed in the menu. Can be overridden, but should keep the same signature.
Default is:
1 - Item 1
2 - Another Item
:param int index: The index of the item in the items list of the menu
:return: The representation of the item to be shown in a menu
:rtype: str
"""
return "%d - %s" % (index + 1, self.text)
def set_up(self):
"""
Override to add any setup actions necessary for the item
"""
pass
def action(self):
"""
Override to carry out the main action for this item.
"""
pass
def clean_up(self):
"""
Override to add any cleanup actions necessary for the item
"""
pass
def get_return(self):
"""
Override to change what the item returns.
Otherwise just returns the same value the last selected item did.
"""
return self.menu.returned_value
class ExitItem(MenuItem):
"""
Used to exit the current menu. Handled by :class:`cursesmenu.CursesMenu`
"""
def __init__(self, text="Exit", menu=None):
super(ExitItem, self).__init__(text=text, menu=menu, should_exit=True)
def show(self, index):
"""
This class overrides this method
"""
if self.menu and self.menu.parent:
self.text = "Return to %s menu" % self.menu.parent.title
else:
self.text = "Exit"
return super(ExitItem, self).show(index)
def clear_terminal():
"""
Call the platform specific function to clear the terminal: cls on windows, reset otherwise
"""
if platform.system().lower() == "windows":
os.system('cls')
else:
os.system('reset')
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster'])
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan", "longitudinalPlan"],
"carState": [], "controlsState": [], "radarState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("CommunityFeaturesToggle", True)
os.environ['NO_RADAR_SLEEP'] = "1"
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
os.environ["SIMULATION"] = "1" # Disable submaster alive checks
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(tqdm(pub_msgs, disable=False)):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import (
assert_python_ok, assert_python_failure, run_python_until_end)
from test.support import import_helper
from test.support import os_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support import skip_if_sanitizer
from test.support.os_helper import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
os_helper.unlink(os_helper.TESTFN)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w', encoding="utf-8")
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w', encoding="utf-8")
def test_raw_file_io(self):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(os_helper.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(os_helper.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(os_helper.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "a", encoding="utf-8") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with warnings_helper.check_warnings(('', ResourceWarning)):
f = MyFileIO(os_helper.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(os_helper.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'w',
encoding="utf-8", closefd=False)
def test_read_closed(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(os_helper.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, os_helper.TESTFN, "r",
encoding="utf-8", closefd=False)
def test_closefd_attr(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with warnings_helper.check_warnings(('', ResourceWarning)):
f = self.FileIO(os_helper.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(os_helper.TESTFN, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(os_helper.TESTFN, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(os_helper.TESTFN, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8", closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", encoding="utf-8", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", buffering=0)
def test_invalid_newline(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(path, "r", encoding="utf-8") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(os_helper.TESTFN))
check_path_succeeds(FakePath(os.fsencode(os_helper.TESTFN)))
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w', encoding="utf-8")
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(os_helper.TESTFN), 'rwxa', encoding="utf-8")
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
# TODO: RUSTPYTHON, AssertionError: filter ('', ResourceWarning) did not catch any warning
@unittest.expectedFailure
def test_destructor(self):
super().test_destructor(self)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(os_helper.TESTFN, "wb") as f:
f.write(s)
with self.open(os_helper.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
def test_truncate_on_read_only(self):
rawio = self.MockFileIO(b"abc")
bufio = self.tp(rawio)
self.assertFalse(bufio.writable())
self.assertRaises(self.UnsupportedOperation, bufio.truncate)
self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
def test_bad_readinto_value(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: -1
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsNone(cm.exception.__cause__)
# TODO: RUSTPYTHON, TypeError: 'bytes' object cannot be interpreted as an integer")
@unittest.expectedFailure
def test_bad_readinto_type(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: b''
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsInstance(cm.exception.__cause__, TypeError)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_flush_error_on_close(self):
super().test_flush_error_on_close()
# TODO: RUSTPYTHON, AssertionError: UnsupportedOperation not raised by truncate
@unittest.expectedFailure
def test_truncate_on_read_only(self): # TODO: RUSTPYTHON, remove when this passes
super().test_truncate_on_read_only() # TODO: RUSTPYTHON, remove when this passes
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(os_helper.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(os_helper.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_flush_error_on_close(self):
super().test_flush_error_on_close()
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
# writable() returns True, so there's no point to test it over
# a writable stream.
test_truncate_on_read_only = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_flush_error_on_close(self):
super().test_flush_error_on_close()
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
# bpo-41919: This method is separated from StatefulIncrementalDecoder to avoid a resource leak
# when registering codecs and cleanup functions.
def lookupTestDecoder(name):
if StatefulIncrementalDecoder.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=StatefulIncrementalDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
os_helper.unlink(os_helper.TESTFN)
codecs.register(lookupTestDecoder)
self.addCleanup(codecs.unregister, lookupTestDecoder)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, encoding="utf-8", newline=42)
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO(), encoding="utf-8")
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b, encoding="ascii")
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw, encoding="utf-8")
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b, encoding="locale")
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b, encoding="locale")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio, encoding="utf-8").xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(os_helper.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(os_helper.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(os_helper.TESTFN, "wb") as f:
f.write(line*2)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(os_helper.TESTFN, "wb") as f:
f.write(data)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(os_helper.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(os_helper.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(os_helper.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_multibyte_seek_and_tell(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_with_encoder_state(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable(), encoding="utf-8")
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"), encoding="utf-8")
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO(), encoding="utf-8")
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_bom(self):
# Same test, but when seeking manually
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertEqual(f.errors, "strict")
with self.open(os_helper.TESTFN, "w", encoding="utf-8", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(os_helper.TESTFN, "w", encoding="utf-8", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with threading_helper.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(os_helper.TESTFN, encoding="utf-8") as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata), encoding="utf-8")
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "LookupError: unknown encoding: ascii"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor(self):
super().test_constructor()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_detach(self):
super().test_detach()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_encoding_read(self):
super().test_reconfigure_encoding_read()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_line_buffering(self):
super().test_reconfigure_line_buffering()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_basic_io(self):
super().test_basic_io()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_telling(self):
super().test_telling()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_uninitialized(self):
super().test_uninitialized()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_non_text_encoding_codecs_are_rejected(self):
super().test_non_text_encoding_codecs_are_rejected()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repr(self):
super().test_repr()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines_input(self):
super().test_newlines_input()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_one_by_one(self):
super().test_read_one_by_one()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_by_chunk(self):
super().test_read_by_chunk()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_1(self):
super().test_issue1395_1()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_2(self):
super().test_issue1395_2()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_3(self):
super().test_issue1395_3()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_4(self):
super().test_issue1395_4()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_5(self):
super().test_issue1395_5()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_through(self):
super().test_reconfigure_write_through()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_fromascii(self):
super().test_reconfigure_write_fromascii()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write(self):
super().test_reconfigure_write()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_non_seekable(self):
super().test_reconfigure_write_non_seekable()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_defaults(self):
super().test_reconfigure_defaults()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_newline(self):
super().test_reconfigure_newline()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(os_helper.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
# XXX RUSTPYTHON: universal mode is deprecated anyway, so I
# feel fine about skipping it
# with warnings_helper.check_warnings(('', DeprecationWarning)):
# f = self.open(os_helper.TESTFN, "U", encoding="utf-8")
# self.assertEqual(f.name, os_helper.TESTFN)
# self.assertEqual(f.buffer.name, os_helper.TESTFN)
# self.assertEqual(f.buffer.raw.name, os_helper.TESTFN)
# self.assertEqual(f.mode, "U")
# self.assertEqual(f.buffer.mode, "rb")
# self.assertEqual(f.buffer.raw.mode, "rb")
# f.close()
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a', encoding="utf-8")
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
f = self.open(os_helper.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(os_helper.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(os_helper.TESTFN, "wb")
self._check_warn_on_dealloc(os_helper.TESTFN, "w", encoding="utf-8")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings_helper.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r", encoding="utf-8")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(os_helper.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(os_helper.TESTFN, 'w', encoding="utf-8"):
pass
self.assertRaises(FileExistsError, self.open, os_helper.TESTFN, 'x', encoding="utf-8")
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(os_helper.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(os_helper.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'rwax+', encoding="utf-8")
def test_check_encoding_errors(self):
# bpo-37388: open() and TextIOWrapper must check encoding and errors
# arguments in dev mode
mod = self.io.__name__
filename = __file__
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
from {mod} import open, TextIOWrapper
try:
open({filename!r}, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
open({filename!r}, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
fp = open({filename!r}, "rb")
with fp:
try:
TextIOWrapper(fp, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
TextIOWrapper(fp, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
# TODO: RUSTPYTHON, AssertionError: 0 != 2
@unittest.expectedFailure
def test_check_encoding_warning(self):
# PEP 597: Raise warning when encoding is not specified
# and sys.flags.warn_default_encoding is set.
mod = self.io.__name__
filename = __file__
code = textwrap.dedent(f'''\
import sys
from {mod} import open, TextIOWrapper
import pathlib
with open({filename!r}) as f: # line 5
pass
pathlib.Path({filename!r}).read_text() # line 8
''')
proc = assert_python_ok('-X', 'warn_default_encoding', '-c', code)
warnings = proc.err.splitlines()
self.assertEqual(len(warnings), 2)
self.assertTrue(
warnings[0].startswith(b"<string>:5: EncodingWarning: "))
self.assertTrue(
warnings[1].startswith(b"<string>:8: EncodingWarning: "))
@support.cpython_only
# Depending if OpenWrapper was already created or not, the warning is
# emitted or not. For example, the attribute is already created when this
# test is run multiple times.
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_openwrapper(self):
self.assertIs(self.io.OpenWrapper, self.io.open)
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: _enter_buffered_busy: "
r"could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
# TODO: RUSTPYTHON, AssertionError: 22 != 10 : _PythonRunResult(rc=22, out=b'', err=b'')
@unittest.expectedFailure
def test_check_encoding_errors(self): # TODO: RUSTPYTHON, remove when this passes
super().test_check_encoding_errors() # TODO: RUSTPYTHON, remove when this passes
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.collect() seems to be enough to
# work around all these issues.
support.gc_collect() # For PyPy or other GCs.
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r", encoding="latin1")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = import_helper.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__# + ["IncrementalNewlineDecoder"] XXX RUSTPYTHON
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# TODO: RUSTPYTHON (need to update io.py, see bpo-43680)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
import importlib.machinery
import importlib.util
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support import import_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned a result '
br'with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_heap_ctype_doc_and_text_signature(self):
self.assertEqual(_testcapi.HeapDocCType.__doc__, "somedoc")
self.assertEqual(_testcapi.HeapDocCType.__text_signature__, "(arg1, arg2)")
def test_null_type_doc(self):
self.assertEqual(_testcapi.NullTpDocType.__doc__, None)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with threading_helper.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@threading_helper.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
# Suppress warning from PyUnicode_FromUnicode().
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_widechar(self):
_testcapi.test_widechar()
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
if __name__ == "__main__":
unittest.main()
|
simulator.py
|
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import random
import threading
import time
import spam_factory
log = logging.getLogger(__name__)
def threader(func):
def wrapper(self, *args, **kwargs):
threading.Thread(target=func, args=(self,)).start()
return wrapper
class Simulator(object):
def __init__(self, name, pipeline, cache, keeper):
"""Create an instance of `Simulator` class
@param name: Name of the pipeline
@type name: `str`
@param pipeline: Pipeline to be executed
@type pipeline: `dict`
@param cahce: Reference to the cache
@type cache: `spamostack.cache.Cache`
@param keeper: Reference to the keeper
@type keeper: `keeper.Keeper`
"""
self.name = name
self.pipeline = pipeline
self.cache = cache
self.keeper = keeper
users = self.keeper.get(
"keystone", "users", "id",
lambda x: x in self.cache["keystone"]["users"])
user = random.choice(users)
self.user = self.cache["users"][user.name]
self.user["auth_url"] = self.cache["api"]["auth_url"]
self.client_factory = spam_factory.SpamFactory(self.cache, self.user,
self.keeper)
@threader
def simulate(self):
"""Simulate an actions."""
def loop(pipe_client, pipe, parent_obj):
for key, value in pipe.iteritems():
attr = getattr(parent_obj, key)
if isinstance(value, dict):
loop(pipe_client, value, attr)
else:
self.rotate(attr, *value)
for pipe_client, pipe in self.pipeline.iteritems():
log.debug("Creating client {}".format(pipe_client))
client = getattr(self.client_factory, "spam_" + pipe_client)()
loop("spam_" + pipe_client, pipe, client.spam)
def rotate(self, func, period, number, count):
"""Execute method specific number of times
in the period and repeat it specific number of times.
@param func: Method to be executed
@type func: `method`
@param period: Time line to execute the method
@type period: `int`
@param number: Number of executes
@type number: `int`
@param count: Number of repeats that period
@type count: `int`
"""
for cycle in xrange(count):
for execute in xrange(number):
func()
time.sleep(random.randint(0, period / number))
|
offline.py
|
import os
import eel
from pygame import mixer
from mutagen.mp3 import MP3
import threading
os.system('cls')
print('')
print(r'example:- C:\Users\Admin\Desktop\music')
dir = input('ENTER YOUR MUSIC DIRECTORY (leave empty to load default directory) \n\n=>')
print('')
if dir == '':
with open('config.txt') as f:
Store_Temp_Streaming_Data = f.readline()
Default_Local_Music_Dir = f.readline()
dir = Default_Local_Music_Dir[25:]
eel.init("web") # initialises eel
arr = [] #array keeps track of all songs
i = 0
o = 0 #counter for songs
status = 1 #for play/pause status
vol = 1.0 #controls volume (1.0 = maximum volume)
# adds all songs to array
mylist = os.listdir(dir)
while i != len(mylist):
arr.append(mylist[i])
i += 1
@eel.expose
def play():
# plays music
global status
status = 1
mixer.music.unpause()
return 'play'
@eel.expose
# pauses music
def pause():
global status
status = 0
mixer.music.pause()
return 'pause'
@eel.expose
# increases volume
def vol_up():
global vol
vol += 0.1
if vol > 1.0:
vol = 1.0
mixer.music.set_volume(vol)
return str('volume: ' + str(round(vol * 100)))
@eel.expose
# decreases volume
def vol_down():
global vol
vol -= 0.1
if vol < 0.1:
vol = 0
mixer.music.set_volume(vol)
return str('volume: ' + str(round(vol * 100)))
@eel.expose
def next():
global arr
global o
global status
# if music is not paused
if status == 1:
if o + 1 != len(arr):
# loads and plays next song
try:
o += 1
mixer.music.load(dir + "\\" + arr[o])
except:
return
mixer.music.play()
return [arr[o][:-4], 'next']
# if all songs have been played, it starts playing from the begining
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
return [arr[o][:-4], 'next']
# if music is paused
elif status == 0:
if o + 1 != len(arr):
# loads and plays next song
try:
o += 1
mixer.music.load(dir + "\\" + arr[o])
except:
o += 1
mixer.music.load(dir + "\\" + arr[o])
return
mixer.music.play()
mixer.music.pause()
return [arr[o][:-4], 'next']
# if all songs have been played, it starts playing from the begining
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
return [arr[o][:-4], 'next']
@eel.expose
def previous():
global arr
global o
global status
# if music is not paused
if status == 1:
# loads and plays previous song
try:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
except:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
return
mixer.music.play()
return [arr[o][:-4], 'previous']
# if music is paused
elif status == 0:
# loads and plays previous song
try:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
except:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
return
mixer.music.play()
mixer.music.pause()
return [arr[o][:-4], 'previous']
@eel.expose
def main():
global arr
global o
global status
# updates the HTML header with the current playing song
eel.name_update(arr[o][:-4])
# gets song length
def length():
length = MP3(dir + "\\" + arr[o]).info.length
return int(length)
# updates song slider bar
while mixer.music.get_busy() != 0:
os.system('cls')
print('now playing: ', '#' + str(o + 1) , arr[o][:-4])
eel.time(int((((mixer.music.get_pos()) / 1000) / length()) * 100000000))
while status == 0:
os.system('cls')
print('now playing: ', '#' + str(o + 1) , arr[o][:-4])
eel.time(int((((mixer.music.get_pos()) / 1000) / length()) * 100000000))
# plays next song if song has finished
if mixer.music.get_busy() == 0:
o += 1
if o != len(arr):
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
main()
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
main()
# Starts the index.html file
def start():
eel.start("index.html", size=(551, 390), position=(0,0))
mixer.init()
mixer.music.load(dir + '\\' + arr[o])
mixer.music.play()
if __name__ == '__main__':
threading.Thread(target = start).start()
main()
|
conftest.py
|
from __future__ import absolute_import
import os
import signal
import pytest
import time
import django_rq
from celery.signals import worker_ready
from django.core.cache import cache
from .celery import celery
CELERY_WORKER_READY = list()
@worker_ready.connect
def on_worker_ready(**kwargs):
"""Called when the Celery worker thread is ready to do work.
This is to avoid race conditions since everything is in one python process.
"""
CELERY_WORKER_READY.append(True)
@pytest.yield_fixture
def cleared_cache(request):
cache.clear()
yield cache
cache.clear()
@pytest.yield_fixture
def celery_worker(request):
"""Fixture starting a celery worker in background"""
from multiprocessing import Process
# Always clear the queue first
celery.control.purge()
celery_args = ['-C', '-q', '-l', 'FATAL', '-c', '1', '-P', 'solo', '--without-gossip']
proc = Process(target=lambda: celery.worker_main(celery_args))
proc.start()
# Wait for worker to finish initializing to avoid a race condition I've been experiencing.
for i in range(5):
if CELERY_WORKER_READY:
break
time.sleep(1)
yield proc
proc.terminate()
proc.join(10)
try:
os.kill(proc.pid, signal.SIGKILL)
except:
pass
celery.control.purge()
del CELERY_WORKER_READY[:]
@pytest.yield_fixture
def rq_worker(request):
"""Fixture starting a rq worker in background"""
from multiprocessing import Process
# First clear the queue
[queue.empty() for queue in django_rq.get_worker().queues]
def _proc_target(env):
import os
os.environ.update(env)
worker = django_rq.get_worker()
worker.work()
proc = Process(target=_proc_target, kwargs={
'env': {'DJANGO_SETTINGS_MODULE': 'livewatch.tests.settings'}
})
proc.start()
while not proc.is_alive():
time.sleep(1)
yield proc
# Wait for rq to exit, timeout 5 seconds.
proc.terminate()
proc.join(10)
try:
os.kill(proc.pid, signal.SIGKILL)
except:
pass
[queue.empty() for queue in django_rq.get_worker().queues]
|
test_process.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import uuid
import threading
from django.test import TransactionTestCase
from pipeline.eri.models import Process
from pipeline.eri.imp.process import ProcessMixin
class ProcessMixinTestCase(TransactionTestCase):
def setUp(self):
self.mixin = ProcessMixin()
self.process = Process.objects.create(priority=1, queue="queue")
def test_beat(self):
last_heartbeat = self.process.last_heartbeat
self.mixin.beat(self.process.id)
self.process.refresh_from_db()
self.assertTrue(last_heartbeat < self.process.last_heartbeat)
def test_wake_up(self):
self.assertTrue(self.process.asleep)
self.mixin.wake_up(self.process.id)
self.process.refresh_from_db()
self.assertFalse(self.process.asleep)
def test_sleep(self):
self.process.asleep = False
self.process.save()
self.mixin.sleep(self.process.id)
self.process.refresh_from_db()
self.assertTrue(self.process.asleep)
def test_suspend(self):
self.assertFalse(self.process.suspended)
self.assertEqual(self.process.suspended_by, "")
self.mixin.suspend(self.process.id, "123")
self.process.refresh_from_db()
self.assertTrue(self.process.suspended)
self.assertEqual(self.process.suspended_by, "123")
def test_kill(self):
self.process.asleep = False
self.process.save()
self.mixin.kill(self.process.id)
self.process.refresh_from_db()
self.assertTrue(self.process.asleep)
def test_resume(self):
self.mixin.suspend(self.process.id, "123")
self.mixin.resume(self.process.id)
self.process.refresh_from_db()
self.assertFalse(self.process.suspended)
self.assertEqual(self.process.suspended_by, "")
def test_batch_resume(self):
p1 = Process.objects.create(priority=1, queue="queue")
p2 = Process.objects.create(priority=1, queue="queue")
p3 = Process.objects.create(priority=1, queue="queue")
self.mixin.suspend(p1.id, "123")
self.mixin.suspend(p2.id, "123")
self.mixin.suspend(p3.id, "123")
self.mixin.batch_resume([p1.id, p2.id, p3.id])
p1.refresh_from_db()
p2.refresh_from_db()
p3.refresh_from_db()
self.assertFalse(p1.suspended)
self.assertFalse(p2.suspended)
self.assertFalse(p3.suspended)
self.assertEqual(p1.suspended_by, "")
self.assertEqual(p2.suspended_by, "")
self.assertEqual(p3.suspended_by, "")
def test_die(self):
self.assertFalse(self.process.dead)
self.mixin.die(self.process.id)
self.process.refresh_from_db()
self.assertTrue(self.process.dead)
def test_get_process_info(self):
process = Process.objects.create(
priority=1, queue="queue", destination_id="d", root_pipeline_id="r", pipeline_stack="[]", parent_id=2
)
process_info = self.mixin.get_process_info(process.id)
self.assertEqual(process_info.process_id, process.id)
self.assertEqual(process_info.destination_id, process.destination_id)
self.assertEqual(process_info.root_pipeline_id, process.root_pipeline_id)
self.assertEqual(process_info.pipeline_stack, [])
self.assertEqual(process_info.parent_id, process.parent_id)
def test_get_suspended_process_info(self):
p1 = Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex)
p2 = Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex)
p3 = Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex)
self.mixin.suspend(p1.id, "123")
self.mixin.suspend(p2.id, "123")
self.mixin.suspend(p3.id, "123")
spi_list = self.mixin.get_suspended_process_info("123")
actual = [(spi.process_id, spi.current_node) for spi in spi_list]
self.assertEqual(
actual, [(p1.id, p1.current_node_id), (p2.id, p2.current_node_id), (p3.id, p3.current_node_id)]
)
def test_get_sleep_process_with_current_node_id(self):
process = Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex)
self.mixin.sleep(process.id)
self.assertEqual(self.mixin.get_sleep_process_with_current_node_id(process.current_node_id), process.id)
def test_get_sleep_process_with_current_node_id__not_exist(self):
self.assertIsNone(self.mixin.get_sleep_process_with_current_node_id("not_exist"))
def test_get_sleep_process_with_current_node_id__more_than_one(self):
p1 = Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex)
p2 = Process.objects.create(priority=1, queue="queue", current_node_id=p1.current_node_id)
self.mixin.sleep(p1.id)
self.mixin.sleep(p2.id)
self.assertRaises(ValueError, self.mixin.get_sleep_process_with_current_node_id, p1.current_node_id)
def test_get_process_id_with_current_node_id(self):
p1 = Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex)
Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex, dead=True)
self.assertEqual(self.mixin.get_process_id_with_current_node_id(p1.current_node_id), p1.id)
def test_get_process_id_with_current_node_id__not_exist(self):
self.assertIsNone(self.mixin.get_process_id_with_current_node_id("not_exist"))
def test_get_process_id_with_current_node_id_more_than_one(self):
p1 = Process.objects.create(priority=1, queue="queue", current_node_id=uuid.uuid1().hex)
p2 = Process.objects.create(priority=1, queue="queue", current_node_id=p1.current_node_id)
self.mixin.sleep(p1.id)
self.mixin.sleep(p2.id)
self.assertRaises(ValueError, self.mixin.get_process_id_with_current_node_id, p1.current_node_id)
def test_set_current_node(self):
node_id = uuid.uuid1().hex
self.mixin.set_current_node(self.process.id, node_id)
self.process.refresh_from_db()
self.assertEqual(self.process.current_node_id, node_id)
def test_child_process_finish(self):
need_ack = 30
process = Process.objects.create(priority=1, queue="queue", ack_num=0, need_ack=need_ack)
lock = threading.Lock()
res = {False: 0, True: 0}
def target(parent_id, process_id):
success = self.mixin.child_process_finish(parent_id, process_id)
lock.acquire()
res[success] += 1
lock.release()
threads = [threading.Thread(target=target, args=(process.id, i)) for i in range(need_ack)]
for t in threads:
t.start()
for t in threads:
t.join(1)
process.refresh_from_db()
self.assertEqual(process.ack_num, 0)
self.assertEqual(process.need_ack, -1)
self.assertEqual(res, {True: 1, False: need_ack - 1})
def test_is_frozen(self):
self.assertFalse(self.mixin.is_frozen(self.process.id))
self.process.frozen = True
self.process.save()
self.assertTrue(self.mixin.is_frozen(self.process.id))
def test_freeze(self):
self.assertFalse(self.process.frozen)
self.mixin.freeze(self.process.id)
self.process.refresh_from_db()
self.assertTrue(self.process.frozen)
def test_fork(self):
from_to = {}
for i in range(10):
from_to[str(i)] = str(i + 1)
dps = self.mixin.fork(parent_id=self.process.id, root_pipeline_id="r", pipeline_stack=[1, 2], from_to=from_to)
self.assertEqual(len(dps), 10)
actual = [dp.node_id for dp in dps]
self.assertEqual(actual, [str(i) for i in range(10)])
def test_fork__parent_does_not_exist(self):
self.assertRaises(
Process.DoesNotExist,
self.mixin.fork,
parent_id=self.process.id + 1,
root_pipeline_id="r",
pipeline_stack=[1, 2],
from_to={},
)
def test_join(self):
self.mixin.join(self.process.id, list(range(100)))
self.process.refresh_from_db()
self.assertEqual(self.process.ack_num, 0)
self.assertEqual(self.process.need_ack, 100)
def test_set_pipeline_stack(self):
self.assertEqual(self.process.pipeline_stack, "[]")
self.mixin.set_pipeline_stack(self.process.id, ["1", "2", "3"])
self.process.refresh_from_db()
self.assertEqual(self.process.pipeline_stack, '["1", "2", "3"]')
def test_get_process_info_with_root_pipeline(self):
self.process.root_pipeline_id = "root"
self.process.save()
p = self.mixin.get_process_info_with_root_pipeline("root")
self.assertEqual(1, len(p))
self.assertEqual(p[0].root_pipeline_id, "root")
self.assertEqual(p[0].process_id, self.process.id)
self.assertEqual(p[0].destination_id, self.process.destination_id)
self.assertEqual(p[0].pipeline_stack, [])
self.assertEqual(p[0].parent_id, self.process.parent_id)
p = self.mixin.get_process_info_with_root_pipeline("not_exist")
self.assertEqual(0, len(p))
|
app.py
|
import tkinter
from tkinter import ttk
import tkinter.scrolledtext as tst
import json
import re
import time
from new_product import new_product_alarm as Alarm
import threading
import queue
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
from contextlib import contextmanager
notice_list = []
q = queue.Queue()
class App(object):
def __init__(self):
self.js = jsonManage()
self.root = tkinter.Tk()
self.root.size = '440*440'
self.root.title('新品监控')
self.root.geometry('500x500')
self.root.resizable(False, False)
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
self.tabControl = tkinter.ttk.Notebook(self.root)
self.tab2 = ttk.Frame(self.tabControl)
self.tab2.columnconfigure(0, weight=1)
self.tab2.rowconfigure(0, weight=1)
self.tabControl.add(self.tab2, text='主页面')
self.tabControl.pack()
# 输入网址
self.url = tkinter.Text(self.tab2, width='50', height='2')
self.url.pack()
# 网址按钮
self.button_go = tkinter.Button(self.tab2, text='GO!', command=self.go)
self.button_go.pack()
# 邮箱地址和密码
self.show1 = tkinter.StringVar()
self.sender_email = tkinter.Entry(self.tab2, textvariable=self.show1)
self.show1.set('email')
self.sender_email.pack()
self.show2 = tkinter.StringVar()
self.sender_pass = tkinter.Entry(self.tab2, textvariable=self.show2)
self.show2.set('password')
self.sender_pass.pack()
# 通知方式列表显示
self.b_mighty = tkinter.ttk.tkinter.LabelFrame(self.tab2)
self.b_mighty.pack(fill=tkinter.BOTH, expand=1)
self.b_listbox1 = tkinter.Listbox(self.b_mighty, width=33, height=10)
self.b_listbox2 = tkinter.Listbox(self.b_mighty, width=33, height=10)
self.b_listbox1.pack(padx=5, side=tkinter.LEFT, anchor=tkinter.NW)
self.b_listbox2.pack(padx=5, side=tkinter.RIGHT, anchor=tkinter.NW)
# qq输入
self.b_lable1 = tkinter.Label(self.tab2, text="QQ")
self.b_lable1.pack(padx=0, pady=5, side=tkinter.LEFT)
self.b_qq_input = tst.ScrolledText(self.tab2, width=22, height=2)
self.b_qq_input.pack(padx=0, pady=5, side=tkinter.LEFT)
# 邮箱输入
self.b_lable2 = tkinter.Label(self.tab2, text="邮箱")
self.b_lable2.pack(padx=0, pady=0, side=tkinter.LEFT)
self.b_email_input = tst.ScrolledText(self.tab2, width=22, height=2)
self.b_email_input.pack(
padx=0, pady=0, side=tkinter.LEFT, anchor=tkinter.S)
# qq 邮件增加删除按钮
self.button_add_email = tkinter.Button(
self.tab2, text='增加', command=self.AddKeyword)
self.button_add_email.pack(
padx=0, pady=0, side=tkinter.BOTTOM, anchor=tkinter.NW)
self.button_delete_email = tkinter.Button(
self.tab2, text='删除', command=self.DeleteKeyword)
self.button_delete_email.pack(
padx=0, pady=0, side=tkinter.LEFT, anchor=tkinter.S)
def mainloop(self):
self.root.mainloop()
def AddKeyword(self):
global notice_list
qq = None
email = None
qq = self.b_qq_input.get(1.0, tkinter.END)
email = self.b_email_input.get(1.0, tkinter.END)
if(qq.isspace() is not True):
try:
notice_list.append('qq' + '|' + str(qq))
self.js.writejson('notice.json', notice_list)
finally:
self.b_qq_input.delete(1.0, tkinter.END)
if(email.isspace() is not True):
try:
notice_list.append('email' + '|' + str(email))
self.js.writejson('notice.json', notice_list)
finally:
self.b_email_input.delete(1.0, tkinter.END)
self.ShowKeyWord()
def ShowKeyWord(self):
global notice_list
try:
notice_list = self.js.readjson('notice.json')
notice_list_len = len(notice_list)
self.b_listbox1.delete(0, 'end')
self.b_listbox2.delete(0, 'end')
if(notice_list_len != 0):
for notice in notice_list:
if (str(notice).startswith('qq')):
self.b_listbox1.insert(tkinter.END, 'qq')
self.b_listbox2.insert(
tkinter.END, str(notice).split('|')[1])
elif (str(notice).startswith('email')):
self.b_listbox1.insert(tkinter.END, 'email')
self.b_listbox2.insert(
tkinter.END, str(notice).split('|')[1])
except:
pass
def DeleteKeyword(self):
global notice_list
for i in range(len(notice_list)):
if(self.b_listbox1.selection_includes(i) is True):
notice_list.pop(i)
self.js.writejson('notice.json', notice_list)
self.ShowKeyWord()
def go(self):
url = self.url.get('1.0', tkinter.END)
q.put(url)
self.thread = threading.Thread(target=self.go_child)
self.thread.setDaemon(True)
self.thread.start()
self.url.delete(1.0, tkinter.END)
def go_child(self):
global notice_list
while True:
url = q.get()
alarm = Alarm()
if(alarm.yes_or_no(url) is True):
try:
alarm.music_notice()
for notice in notice_list:
if (str(notice).startswith('email')):
#print(str(notice).split('|')[1])
self.email_notice(url, str(notice).split('|')[1])
finally:
break
else:
time.sleep(20)
def email_notice(self, url, email):
sender = self.sender_email.get()
sender_pass = self.sender_pass.get()
#print('sender:' + sender + 'email_pass:' + sender_pass + 'email' + email)
if (sender == 'email') or (sender_pass == 'email_pass'):
print('sender2:' + sender + 'email_pass:' + sender_pass + 'email' + email)
return
else:
with logined(sender, sender_pass) as smtp_serv:
msg = MIMEText('你的丝芙兰商品已到货,网址:' + url, 'plain', 'utf-8')
# 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['From'] = formataddr(["商品监控", sender])
# 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['To'] = formataddr(["收件人", email])
# 邮件的主题,也可以说是标题
msg['Subject'] = "邮件主题-提醒到货"
# 发件人邮箱中的SMTP服务器,端口是465
server = smtplib.SMTP_SSL("smtp.qq.com", 465)
# 括号中对应的是发件人邮箱账号、邮箱密码
server.login(sender, sender_pass)
# 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
try:
smtp_serv.send_message(msg)
finally:
pass
@contextmanager
def logined(sender, password, smtp_host='smtp.qq.com', smtp_port=587):
smtp_serv = smtplib.SMTP(smtp_host, smtp_port, timeout=10)
try: # make smtp server and login
smtp_serv.ehlo_or_helo_if_needed()
smtp_serv.starttls()
smtp_serv.ehlo()
smtp_serv.login(sender, password)
yield smtp_serv
finally:
pass
class jsonManage(object):
def __init__(self):
pass
def writejson(self, filename, data):
with open(filename, 'w', encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False)
def readjson(self, filename):
with open(filename, 'r', encoding="utf-8") as f:
notice_list = json.load(f)
return notice_list
if __name__ == "__main__":
s = App()
s.ShowKeyWord()
s.mainloop()
|
prefork.py
|
import multiprocessing
import select
from typing import Dict
from qactuar import ASGIApp, Config
from qactuar.processes.prefork import make_child
from qactuar.servers.base import BaseQactuarServer
class PreForkServer(BaseQactuarServer):
def __init__(
self,
host: str = None,
port: int = None,
app: ASGIApp = None,
config: Config = None,
):
super().__init__(host, port, app, config)
self.queues: Dict[int, multiprocessing.Queue] = {}
self.current_process = 0
def serve_forever(self) -> None:
self.start_up()
for i in range(self.config.PROCESS_POOL_SIZE or 1):
self.queues[i] = multiprocessing.Queue()
self.processes[i] = multiprocessing.Process(
target=make_child, args=(self, self.queues[i])
)
self.processes[i].daemon = True
self.processes[i].start()
try:
while True:
self.select_socket()
except KeyboardInterrupt:
self.shut_down()
except Exception as err:
self.exception_log.exception(err)
self.shut_down()
def select_socket(self) -> None:
ready_to_read, _, _ = select.select(
[self.listen_socket], [], [], self.config.SELECT_SLEEP_TIME
)
if ready_to_read:
self.queues[self.current_process].put_nowait(True)
self.next_process()
def next_process(self) -> None:
if self.current_process >= len(self.processes) - 1:
self.current_process = 0
else:
self.current_process += 1
|
manager.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 50
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
i = 0
if len(r) > 0:
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
learn_shared_memory.py
|
import logging
import multiprocessing
import time
logging.basicConfig(
level=logging.DEBUG,
format='%(processName)s: %(message)s'
)
def f(num, arr):
logging.debug(num)
num.value += 1.0
for i in range(len(arr)):
arr[i] *= 2
if __name__ == "__main__":
num = multiprocessing.Value('f', 0.0) # Value
arr = multiprocessing.Array('i', [1, 2, 3, 4, 5]) # Array
p1 = multiprocessing.Process(target=f, args=(num, arr))
p2 = multiprocessing.Process(target=f, args=(num, arr))
p1.start()
p2.start()
p1.join()
p2.join()
logging.debug(num.value)
logging.debug(arr[:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.